diff --git a/.asf.yaml b/.asf.yaml new file mode 100644 index 00000000000..ec94ae70c25 --- /dev/null +++ b/.asf.yaml @@ -0,0 +1,40 @@ +## Licensed under the terms of http://www.apache.org/licenses/LICENSE-2.0 + +## See https://s.apache.org/asfyaml + +github: + description: "Apache Storm" + homepage: https://storm.apache.org/ + protected_branches: + # Prevent force pushes to primary branches + master: {} + custom_subjects: + new_pr: "[PR] {title} ({repository})" + close_pr: "Re: [PR] {title} ({repository})" + comment_pr: "Re: [PR] {title} ({repository})" + diffcomment: "Re: [PR] {title} ({repository})" + merge_pr: "Re: [PR] {title} ({repository})" + new_issue: "[I] {title} ({repository})" + comment_issue: "Re: [I] {title} ({repository})" + close_issue: "Re: [I] {title} ({repository})" + catchall: "[GH] {title} ({repository})" + new_discussion: "[D] {title} ({repository})" + edit_discussion: "Re: [D] {title} ({repository})" + close_discussion: "Re: [D] {title} ({repository})" + close_discussion_with_comment: "Re: [D] {title} ({repository})" + reopen_discussion: "Re: [D] {title} ({repository})" + new_comment_discussion: "Re: [D] {title} ({repository})" + edit_comment_discussion: "Re: [D] {title} ({repository})" + delete_comment_discussion: "Re: [D] {title} ({repository})" + labels: + - apache + - storm + - streaming + - distributed +notifications: + commits: commits@storm.apache.org + issues: issues@storm.apache.org + pullrequests_status: issues@storm.apache.org + pullrequests_comment: issues@storm.apache.org + # Send dependabot PRs to commits@ instead + pullrequests_bot_dependabot: commits@storm.apache.org diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000000..44c89df2e96 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Some storm-webapp logviewer tests require input files to have LF line endings due to byte counting. +storm-webapp/src/test/resources/*.log.test text eol=lf + +# Convert the test on check-in and check-out (the conversion of all files has been done once on master and should be enforced from now on) +* text=auto + +# There're reports of EOL conversion messing up PNG files, but that might have been a bug in git 2.10 only (see https://github.com/git/git/blob/master/Documentation/RelNotes/2.10.0.txt#L248 for details) +*.png binary +*.tar.gz binary +*.zip binary +*.tgz binary diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000000..2d6f2f5dc84 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,37 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +version: 2 +updates: + + - package-ecosystem: maven + directory: "/examples" + schedule: + interval: monthly + time: '04:00' + open-pull-requests-limit: 20 + + - package-ecosystem: maven + directory: "/" + schedule: + interval: monthly + time: '04:00' + open-pull-requests-limit: 20 + + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "daily" diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 00000000000..1caca76e30d --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,7 @@ +## What is the purpose of the change + +*(Explain why we should have this change)* + +## How was the change tested + +*(Explain what tests did you do to verify the code change)* \ No newline at end of file diff --git a/.github/workflows/maven.yaml b/.github/workflows/maven.yaml new file mode 100644 index 00000000000..f0799767d33 --- /dev/null +++ b/.github/workflows/maven.yaml @@ -0,0 +1,68 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: Java CI with Maven + +on: + pull_request: + branches: [ "master" ] + push: + branches: [ "master" ] + +jobs: + build: + runs-on: ${{ matrix.os }} + continue-on-error: ${{ matrix.experimental }} + strategy: + matrix: + os: [ ubuntu-latest ] + java: [ 17, 21 ] + module: [ Client, Server, Core, External, Check-Updated-License-Files, Integration-Test ] + experimental: [false] + fail-fast: false + steps: + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 + with: + path: ~/.m2/repository + key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }} + restore-keys: | + ${{ runner.os }}-maven- + - name: Set up Python + uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + with: + python-version: '3.10' + - name: Set up Node + uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0 + with: + node-version: 20 + - name: Set up Ruby + uses: ruby/setup-ruby@ed55d55e820a01da7d3e4863a8c51a61d73c3228 # v1.274.0 + with: + ruby-version: '2.7' + - name: Set up JDK ${{ matrix.java }} + uses: actions/setup-java@f2beeb24e141e01a676f977032f5a29d81c9e27e # v5.1.0 + with: + distribution: temurin + java-version: ${{ matrix.java }} + - name: Ensure a clean state without storm artifacts + run: rm -rf ~/.m2/repository/org/apache/storm + - name: Set up project dependencies + run: /bin/bash ./dev-tools/gitact/gitact-install.sh `pwd` + - name: Run build + run: | + export JDK_VERSION=${{ matrix.java }} + export USER=github + /bin/bash ./dev-tools/gitact/gitact-script.sh `pwd` ${{ matrix.module }}; diff --git a/.github/workflows/nightlies.yaml b/.github/workflows/nightlies.yaml new file mode 100644 index 00000000000..753a8317c73 --- /dev/null +++ b/.github/workflows/nightlies.yaml @@ -0,0 +1,71 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +name: Publish Storm to nightlies.apache.org + +on: + workflow_dispatch: { } + schedule: + # every day 5min after midnight, UTC. + - cron: "5 0 * * *" + +jobs: + upload_to_nightlies: + if: github.repository == 'apache/storm' + name: Upload to Nightly Builds + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 + with: + path: ~/.m2/repository + key: nightlies-maven-${{ hashFiles('**/pom.xml') }} + restore-keys: | + nightlies-maven- + - name: Set up Python + uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + with: + python-version: '3.10' + - name: Set up Node + uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0 + with: + node-version: 16 + - name: Set up Ruby + uses: ruby/setup-ruby@ed55d55e820a01da7d3e4863a8c51a61d73c3228 # v1.274.0 + with: + ruby-version: '2.7' + - name: Set up JDK ${{ matrix.java }} + uses: actions/setup-java@f2beeb24e141e01a676f977032f5a29d81c9e27e # v5.1.0 + with: + distribution: temurin + java-version: 20 + - name: Ensure a clean state without storm artifacts + run: rm -rf ~/.m2/repository/org/apache/storm + - name: Set up project dependencies + run: /bin/bash ./dev-tools/gitact/gitact-install.sh `pwd` + - name: Package binaries + run: mvn package -f storm-dist/binary/pom.xml -Dgpg.skip + - name: Upload to nightlies.apache.org + uses: burnett01/rsync-deployments@0dc935cdecc5f5e571865e60d2a6cdc673704823 + with: + switches: -avzh --update --delete --progress --include='*.zip' --include='*.tar.gz' --exclude='*' + path: storm-dist/binary/final-package/target/ + remote_path: ${{ secrets.NIGHTLIES_RSYNC_PATH }}/storm/ + remote_host: ${{ secrets.NIGHTLIES_RSYNC_HOST }} + remote_port: ${{ secrets.NIGHTLIES_RSYNC_PORT }} + remote_user: ${{ secrets.NIGHTLIES_RSYNC_USER }} + remote_key: ${{ secrets.NIGHTLIES_RSYNC_KEY }} diff --git a/.github/workflows/snapshots.yaml b/.github/workflows/snapshots.yaml new file mode 100644 index 00000000000..96205dc2f15 --- /dev/null +++ b/.github/workflows/snapshots.yaml @@ -0,0 +1,77 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +name: Publish Storm SNAPSHOTs + +on: + workflow_dispatch: { } + schedule: + # every day 5min after midnight, UTC. + - cron: "5 0 * * *" + +jobs: + upload_to_nightlies: + if: github.repository == 'apache/storm' + name: Publish Snapshots + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 + with: + path: ~/.m2/repository + key: snapshots-maven-${{ hashFiles('**/pom.xml') }} + restore-keys: | + snapshots-maven- + - name: Set up Python + uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + with: + python-version: '3.10' + - name: Set up Node + uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0 + with: + node-version: 16 + - name: Set up Ruby + uses: ruby/setup-ruby@ed55d55e820a01da7d3e4863a8c51a61d73c3228 # v1.274.0 + with: + ruby-version: '2.7' + - name: Set up JDK ${{ matrix.java }} + uses: actions/setup-java@f2beeb24e141e01a676f977032f5a29d81c9e27e # v5.1.0 + with: + distribution: temurin + java-version: 20 + - id: extract_version + name: Extract project version + shell: bash + run: | + VERSION=$(mvn exec:exec -Dexec.executable='echo' -Dexec.args='${project.version}' --non-recursive -q) + if [[ "$VERSION" == *"SNAPSHOT"* ]]; then + echo "snapshot=SNAPSHOT" >> $GITHUB_OUTPUT + fi + - name: Ensure a clean state without storm artifacts + if: steps.extract_version.outputs.snapshot == 'SNAPSHOT' + run: rm -rf ~/.m2/repository/org/apache/storm + - name: Set up project dependencies + if: steps.extract_version.outputs.snapshot == 'SNAPSHOT' + run: /bin/bash ./dev-tools/gitact/gitact-install.sh `pwd` + - name: Deploy Maven snapshots + if: steps.extract_version.outputs.snapshot == 'SNAPSHOT' + env: + ASF_USERNAME: ${{ secrets.NEXUS_USER }} + ASF_PASSWORD: ${{ secrets.NEXUS_PW }} + run: | + echo "apache.snapshots.https$ASF_USERNAME$ASF_PASSWORD" > settings.xml + mvn --settings settings.xml -U -B -e -fae -ntp -DskipTests -P !examples deploy \ No newline at end of file diff --git a/.gitignore b/.gitignore index b575a0284b3..3786af5c6b9 100644 --- a/.gitignore +++ b/.gitignore @@ -2,9 +2,7 @@ /classes /lib deploy/lib -deploy/logs .emacs-project -*.jar bin/jzmq .DS_Store deploy/classes @@ -21,12 +19,48 @@ _release *.zip *.tar.gz .lein-deps-sum -*.iml target /.project/ /.lein-plugins/ +#.* +!/.gitignore +_site +dependency-reduced-pom.xml +metastore_db +build +/docs/javadocs +*.class + +# logs +logs +*.log + +# Eclipse +.settings/ +.project +.classpath + +# Intellij +*.iml *.ipr *.iws .idea -.* -!/.gitignore + +# Package Files +*.jar +*.war +*.ear + +# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml +hs_err_pid* + +# ignore vagrant files +/integration-test/config/.vagrant/ + +# Test jars for zip slip +!/storm-server/src/test/resources/evil-path-traversal.jar +!/storm-server/src/test/resources/evil-path-traversal-resources.jar + +m2 +install.txt +install-shade.txt \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md deleted file mode 100644 index 2117367ff27..00000000000 --- a/CHANGELOG.md +++ /dev/null @@ -1,426 +0,0 @@ -## 0.9.2-incubating (unreleased) - * STORM-205: Add REST API To Storm UI - * STORM-326: tasks send duplicate metrics - * STORM-331: Update the Kafka dependency of storm-kafka to 0.8.1.1 - * STORM-308: Add support for config_value to {supervisor,nimbus,ui,drpc,logviewer} childopts - * STORM-309: storm-starter Readme: windows documentation update - * STORM-318: update storm-kafka to use apache curator-2.4.0 - * STORM-303: storm-kafka reliability improvements - * STORM-233: Removed inline heartbeat to nimbus to avoid workers being killed when under heavy ZK load - * STORM-267: fix package name of LoggingMetricsConsumer in storm.yaml.example - * STORM-265: upgrade to clojure 1.5.1 - * STORM-232: ship JNI dependencies with the topology jar - * STORM-295: Add storm configuration to define JAVA_HOME - * STORM-138: Pluggable serialization for multilang - * STORM-264: Removes references to the deprecated topology.optimize - * STORM-245: implement Stream.localOrShuffle() for trident - * STORM-317: Add SECURITY.md to release binaries - * STORM-310: Change Twitter authentication - * STORM-305: Create developer documentation - * STORM-280: storm unit tests are failing on windows - * STORM-298: Logback file does not include full path for metrics appender fileNamePattern - * STORM-316: added validation to registermetrics to have timebucketSizeInSecs >= 1 - * STORM-315: Added progress bar when submitting topology - * STORM-214: Windows: storm.cmd does not properly handle multiple -c arguments - * STORM-306: Add security documentation - * STORM-302: Fix Indentation for pom.xml in storm-dist - * STORM-235: Registering a null metric should blow up early - * STORM-113: making thrift usage thread safe for local cluster - * STORM-223: use safe parsing for reading YAML - * STORM-238: LICENSE and NOTICE files are duplicated in storm-core jar - * STORM-276: Add support for logviewer in storm.cmd. - * STORM-286: Use URLEncoder#encode with the encoding specified. - * STORM-296: Storm kafka unit tests are failing on windows - * STORM-291: upgrade http-client to 4.3.3 - * STORM-252: Upgrade curator to latest version - * STORM-294: Commas not escaped in command line - * STORM-287: Fix the positioning of documentation strings in clojure code - * STORM-290: Fix a log binding conflict caused by curator dependencies - * STORM-289: Fix Trident DRPC memory leak - * STORM-173: Treat command line "-c" option number config values as such - * STORM-194: Support list of strings in *.worker.childopts, handle spaces - * STORM-288: Fixes version spelling in pom.xml - * STORM-208: Add storm-kafka as an external module - * STORM-285: Fix storm-core shade plugin config - * STORM-12: reduce thread usage of netty transport - * STORM-281: fix and issue with config parsing that could lead to leaking file descriptors - * STORM-196: When JVM_OPTS are set, storm jar fails to detect storm.jar from environment - * STORM-260: Fix a potential race condition with simulated time in Storm's unit tests - * STORM-258: Update commons-io version to 2.4 - * STORM-270: don't package .clj files in release jars. - * STORM-273: Error while running storm topologies on Windows using "storm jar" - * STROM-247: Replace links to github resources in storm script - * STORM-263: Update Kryo version to 2.21+ - * STORM-187: Fix Netty error "java.lang.IllegalArgumentException: timeout value is negative" - * STORM-186: fix float secs to millis long convertion - * STORM-70: Upgrade to ZK-3.4.5 and curator-1.3.3 - * STORM-146: Unit test regression when storm is compiled with 3.4.5 zookeeper - -## 0.9.1-incubating -* Fix to prevent Nimbus from hanging if random data is sent to nimbus thrift port -* Improved support for running on Windows platforms -* Removed dependency on the `unzip` binary -* Switch build system from Leiningen to Maven -* STORM-1: Replaced 0MQ as the default transport with Netty. -* STORM-181: Nimbus now validates topology configuration when topologies are submitted (thanks d2r) -* STORM-182: Storm UI now includes tooltips to document fields (thanks d2r) -* STORM-195: `dependency-reduced-pom.xml` should be in `.gitignore` -* STORM-13: Change license on README.md -* STORM-2: Move all dependencies off of storm-specific builds -* STORM-159: Upload separate source and javadoc jars for maven use -* STORM-149: `storm jar` doesn't work on Windows - -## 0.9.0.1 -* Update build configuration to force compatibility with Java 1.6 - -## 0.9.0 -* Fixed a netty client issue where sleep times for reconnection could be negative (thanks brndnmtthws) -* Fixed an issue that would cause storm-netty unit tests to fail - -## 0.9.0-rc3 -* Added configuration to limit ShellBolt internal _pendingWrites queue length (thanks xiaokang) -* Fixed a a netty client issue where sleep times for reconnection could be negative (thanks brndnmtthws) -* Fixed a display issue with system stats in Storm UI (thanks d2r) -* Nimbus now does worker heartbeat timeout checks as soon as heartbeats are updated (thanks d2r) -* The logviewer now determines log file location by examining the logback configuration (thanks strongh) -* Allow tick tuples to work with the system bolt (thanks xumingming) -* Add default configuration values for the netty transport and the ability to configure the number of worker threads (thanks revans2) -* Added timeout to unit tests to prevent a situation where tests would hang indefinitely (thanks d2r) -* Fixed and issue in the system bolt where local mode would not be detected accurately (thanks miofthena) - -## 0.9.0-rc2 - -* Fixed `storm jar` command to work properly when STORM_JAR_JVM_OPTS is not specified (thanks roadkill001) - -## 0.9.0-rc1 - - * All logging now done with slf4j - * Replaced log4j logging system with logback - * Logs are now limited to 1GB per worker (configurable via logging configuration file) - * Build upgraded to leiningen 2.0 - * Revamped Trident spout interfaces to support more dynamic spouts, such as a spout who reads from a changing set of brokers - * How tuples are serialized is now pluggable (thanks anfeng) - * Added blowfish encryption based tuple serialization (thanks anfeng) - * Have storm fall back to installed storm.yaml (thanks revans2) - * Improve error message when Storm detects bundled storm.yaml to show the URL's for offending resources (thanks revans2) - * Nimbus throws NotAliveException instead of FileNotFoundException from various query methods when topology is no longer alive (thanks revans2) - * Escape HTML and Javascript appropriately in Storm UI (thanks d2r) - * Storm's Zookeeper client now uses bounded exponential backoff strategy on failures - * Automatically drain and log error stream of multilang subprocesses - * Append component name to thread name of running executors so that logs are easier to read - * Messaging system used for passing messages between workers is now pluggable (thanks anfeng) - * Netty implementation of messaging (thanks anfeng) - * Include topology id, worker port, and worker id in properties for worker processes, useful for logging (thanks d2r) - * Tick tuples can now be scheduled using floating point seconds (thanks tscurtu) - * Added log viewer daemon and links from UI to logviewers (thanks xiaokang) - * DRPC server childopts now configurable (thanks strongh) - * Default number of ackers to number of workers, instead of just one (thanks lyogavin) - * Validate that Storm configs are of proper types/format/structure (thanks d2r) - * FixedBatchSpout will now replay batches appropriately on batch failure (thanks ptgoetz) - * Can set JAR_JVM_OPTS env variable to add jvm options when calling 'storm jar' (thanks srmelody) - * Throw error if batch id for transaction is behind the batch id in the opaque value (thanks mrflip) - * Sort topologies by name in UI (thanks jaked) - * Added LoggingMetricsConsumer to log all metrics to a file, by default not enabled (thanks mrflip) - * Add prepare(Map conf) method to TopologyValidator (thanks ankitoshniwal) - * Bug fix: Supervisor provides full path to workers to logging config rather than relative path (thanks revans2) - * Bug fix: Call ReducerAggregator#init properly when used within persistentAggregate (thanks lorcan) - * Bug fix: Set component-specific configs correctly for Trident spouts - -## 0.8.3 (unreleased) - - * Revert zmq layer to not rely on multipart messages to fix issue reported by some users - * Bug fix: Fix TransactionalMap and OpaqueMap to correctly do multiple updates to the same key in the same batch - * Bug fix: Fix race condition between supervisor and Nimbus that could lead to stormconf.ser errors and infinite crashing of supervisor - * Bug fix: Fix default scheduler to always reassign workers in a constrained topology when there are dead executors - * Bug fix: Fix memory leak in Trident LRUMemoryMapState due to concurrency issue with LRUMap (thanks jasonjckn) - * Bug fix: Properly ignore NoNodeExists exceptions when deleting old transaction states - -## 0.8.2 - - * Added backtype.storm.scheduler.IsolationScheduler. This lets you run topologies that are completely isolated at the machine level. Configure Nimbus to isolate certain topologies, and how many machines to give to each of those topologies, with the isolation.scheduler.machines config in Nimbus's storm.yaml. Topologies run on the cluster that are not listed there will share whatever remaining machines there are on the cluster after machines are allocated to the listed topologies. - * Storm UI now uses nimbus.host to find Nimbus rather than always using localhost (thanks Frostman) - * Added report-error! to Clojure DSL - * Automatically throttle errors sent to Zookeeper/Storm UI when too many are reported in a time interval (all errors are still logged) Configured with TOPOLOGY_MAX_ERROR_REPORT_PER_INTERVAL and TOPOLOGY_ERROR_THROTTLE_INTERVAL_SECS - * Kryo instance used for serialization can now be controlled via IKryoFactory interface and TOPOLOGY_KRYO_FACTORY config - * Add ability to plug in custom code into Nimbus to allow/disallow topologies to be submitted via NIMBUS_TOPOLOGY_VALIDATOR config - * Added TOPOLOGY_TRIDENT_BATCH_EMIT_INTERVAL_MILLIS config to control how often a batch can be emitted in a Trident topology. Defaults to 500 milliseconds. This is used to prevent too much load from being placed on Zookeeper in the case that batches are being processed super quickly. - * Log any topology submissions errors in nimbus.log - * Add static helpers in Config when using regular maps - * Make Trident much more memory efficient during failures by immediately removing state for failed attempts when a more recent attempt is seen - * Add ability to name portions of a Trident computation and have those names appear in the Storm UI - * Show Nimbus and topology configurations through Storm UI (thanks rnfein) - * Added ITupleCollection interface for TridentState's and TupleCollectionGet QueryFunction for getting the full contents of a state. MemoryMapState and LRUMemoryMapState implement this - * Can now submit a topology in inactive state. Storm will wait to call open/prepare on the spouts/bolts until it is first activated. - * Can now activate, deactive, rebalance, and kill topologies from the Storm UI (thanks Frostman) - * Can now use --config option to override which yaml file from ~/.storm to use for the config (thanks tjun) - * Redesigned the pluggable resource scheduler (INimbus, ISupervisor) interfaces to allow for much simpler integrations - * Added prepare method to IScheduler - * Added "throws Exception" to TestJob interface - * Added reportError to multilang protocol and updated Python and Ruby adapters to use it (thanks Lazyshot) - * Number tuples executed now tracked and shown in Storm UI - * Added ReportedFailedException which causes a batch to fail without killing worker and reports the error to the UI - * Execute latency now tracked and shown in Storm UI - * Adding testTuple methods for easily creating Tuple instances to Testing API (thanks xumingming) - * Trident now throws an error during construction of a topology when try to select fields that don't exist in a stream (thanks xumingming) - * Compute the capacity of a bolt based on execute latency and #executed over last 10 minutes and display in UI - * Storm UI displays exception instead of blank page when there's an error rendering the page (thanks Frostman) - * Added MultiScheme interface (thanks sritchie) - * Added MockTridentTuple for testing (thanks emblem) - * Add whitelist methods to Cluster to allow only a subset of hosts to be revealed as available slots - * Updated Trident Debug filter to take in an identifier to use when logging (thanks emblem) - * Number of DRPC server worker threads now customizable (thanks xiaokang) - * DRPC server now uses a bounded queue for requests to prevent being overloaded with requests (thanks xiaokang) - * Add __hash__ method to all generated Python Thrift objects so that Python code can read Nimbus stats which use Thrift objects as dict keys - * Bug fix: Fix for bug that could cause topology to hang when ZMQ blocks sending to a worker that got reassigned - * Bug fix: Fix deadlock bug due to variant of dining philosophers problem. Spouts now use an overflow buffer to prevent blocking and guarantee that it can consume the incoming queue of acks/fails. - * Bug fix: Fix race condition in supervisor that would lead to supervisor continuously crashing due to not finding "stormconf.ser" file for an already killed topology - * Bug fix: bin/storm script now displays a helpful error message when an invalid command is specified - * Bug fix: fixed NPE when emitting during emit method of Aggregator - * Bug fix: URLs with periods in them in Storm UI now route correctly - * Bug fix: Fix occasional cascading worker crashes due when a worker dies due to not removing connections from connection cache appropriately - -## 0.8.1 - - * Exposed Storm's unit testing facilities via the backtype.storm.Testing class. Notable functions are Testing/withLocalCluster and Testing/completeTopology (thanks xumingming) - * Implemented pluggable spout wait strategy that is invoked when a spout emits nothing from nextTuple or when a spout hits the MAX_SPOUT_PENDING limit - * Spouts now have a default wait strategy of a 1 millisecond sleep - * Changed log level of "Failed message" logging to DEBUG - * Deprecated LinearDRPCTopologyBuilder, TimeCacheMap, and transactional topologies - * During "storm jar", whether topology is already running or not is checked before submitting jar to save time (thanks jasonjckn) - * Added BaseMultiReducer class to Trident that provides empty implementations of prepare and cleanup - * Added Negate builtin operation to reverse a Filter - * Added topology.kryo.decorators config that allows functions to be plugged in to customize Kryo (thanks jasonjckn) - * Enable message timeouts when using LocalCluster - * Multilang subprocesses can set "need_task_ids" to false when emitting tuples to tell Storm not to send task ids back (performance optimization) (thanks barrywhart) - * Add contains method on Tuple (thanks okapies) - * Added ISchemableSpout interface - * Bug fix: When an item is consumed off an internal buffer, the entry on the buffer is nulled to allow GC to happen on that data - * Bug fix: Helper class for Trident MapStates now clear their read cache when a new commit happens, preventing updates from spilling over from a failed batch attempt to the next attempt - * Bug fix: Fix NonTransactionalMap to take in an IBackingMap for regular values rather than TransactionalValue (thanks sjoerdmulder) - * Bug fix: Fix NPE when no input fields given for regular Aggregator - * Bug fix: Fix IndexOutOfBoundsExceptions when a bolt for global aggregation had a parallelism greater than 1 (possible with splitting, stateQuerying, and multiReduce) - * Bug fix: Fix "fields size" error that would sometimes occur when splitting a stream with multiple eaches - * Bug fix: Fix bug where a committer spout (including opaque spouts) could cause Trident batches to fail - * Bug fix: Fix Trident bug where multiple groupings on same stream would cause tuples to be duplicated to all consumers - * Bug fix: Fixed error when repartitioning stream twice in a row without any operations in between - * Bug fix: Fix rare bug in supervisor where it would continuously fail to clean up workers because the worker was already partially cleaned up - * Bug fix: Fix emitDirect in storm.py - -## 0.8.0 - - * Added Trident, the new high-level abstraction for intermixing high throughput, stateful stream processing with low-latency distributed querying - * Added executor abstraction between workers and tasks. Workers = processes, executors = threads that run many tasks from the same spout or bolt. - * Pluggable scheduler (thanks xumingming) - * Eliminate explicit storage of task->component in Zookeeper - * Number of workers can be dynamically changed at runtime through rebalance command and -n switch - * Number of executors for a component can be dynamically changed at runtime through rebalance command and -e switch (multiple -e switches allowed) - * Use worker heartbeats instead of task heartbeats (thanks xumingming) - * UI performance for topologies with many executors/tasks much faster due to optimized usage of Zookeeper (10x improvement) - * Added button to show/hide system stats (e.g., acker component and stream stats) from the Storm UI (thanks xumingming) - * Stats are tracked on a per-executor basis instead of per-task basis - * Major optimization for unreliable spouts and unanchored tuples (will use far less CPU) - * Revamped internals of Storm to use LMAX disruptor for internal queuing. Dramatic reductions in contention and CPU usage. - * Numerous micro-optimizations all throughout the codebase to reduce CPU usage. - * Optimized internals of Storm to use much fewer threads - two fewer threads per spout and one fewer thread per acker. - * Removed error method from task hooks (to be re-added at a later time) - * Validate that subscriptions come from valid components and streams, and if it's a field grouping that the schema is correct (thanks xumingming) - * MemoryTransactionalSpout now works in cluster mode - * Only track errors on a component by component basis to reduce the amount stored in zookeeper (to speed up UI). A side effect of this change is the removal of the task page in the UI. - * Add TOPOLOGY-TICK-TUPLE-FREQ-SECS config to have Storm automatically send "tick" tuples to a bolt's execute method coming from the __system component and __tick stream at the configured frequency. Meant to be used as a component-specific configuration. - * Upgrade Kryo to v2.17 - * Tuple is now an interface and is much cleaner. The Clojure DSL helpers have been moved to TupleImpl - * Added shared worker resources. Storm provides a shared ExecutorService thread pool by default. The number of threads in the pool can be configured with topology.worker.shared.thread.pool.size - * Improve CustomStreamGrouping interface to make it more flexible by providing more information - * Enhanced INimbus interface to allow for forced schedulers and better integration with global scheduler - * Added assigned method to ISupervisor so it knows exactly what's running and not running - * Custom serializers can now have one of four constructors: (), (Kryo), (Class), or (Kryo, Class) - * Disallow ":", ".", and "\" from topology names - * Errors in multilang subprocesses that go to stderr will be captured and logged to the worker logs (thanks vinodc) - * Workers detect and warn for missing outbound connections from assignment, drop messages for which there's no outbound connection - * Zookeeper connection timeout is now configurable (via storm.zookeeper.connection.timeout config) - * Storm is now less aggressive about halting process when there are Zookeeper errors, preferring to wait until client calls return exceptions. - * Can configure Zookeeper authentication for Storm's Zookeeper clients via "storm.zookeeper.auth.scheme" and "storm.zookeeper.auth.payload" configs - * Supervisors only download code for topologies assigned to them - * Include task id information in task hooks (thanks velvia) - * Use execvp to spawn daemons (replaces the python launcher process) (thanks ept) - * Expanded INimbus/ISupervisor interfaces to provide more information (used in Storm/Mesos integration) - * Bug fix: Realize task ids when worker heartbeats to supervisor. Some users were hitting deserialization problems here in very rare cases (thanks herberteuler) - * Bug fix: Fix bug where a topology's status would get corrupted to true if nimbus is restarted while status is rebalancing - -## 0.7.4 - - * Bug fix: Disallow slashes in topology names since it causes Nimbus to break by affecting local filesystem and zookeeper paths - * Bug fix: Prevent slow loading tasks from causing worker timeouts by launching the heartbeat thread before tasks are loaded - -## 0.7.3 - - * Changed debug level of "Failed message" logging to DEBUG - * Bug fix: Fixed critical regression in 0.7.2 that could cause workers to timeout to the supervisors or to Nimbus. 0.7.2 moved all system tasks to the same thread, so if one took a long time it would block the other critical tasks. Now different system tasks run on different threads. - -## 0.7.2 - -NOTE: The change from 0.7.0 in which OutputCollector no longer assumes immutable inputs has been reverted to support optimized sending of tuples to colocated tasks - - * Messages sent to colocated tasks are sent in-memory, skipping serialization (useful in conjunction with localOrShuffle grouping) (thanks xumingming) - * Upgrade to Clojure 1.4 (thanks sorenmacbeth) - * Exposed INimbus and ISupervisor interfaces for running Storm on different resource frameworks (like Mesos). - * Can override the hostname that supervisors report using "storm.local.hostname" config. - * Make request timeout within DRPC server configurable via "drpc.request.timeout.secs" - * Added "storm list" command to show running topologies at the command line (thanks xumingming) - * Storm UI displays the release version (thanks xumingming) - * Added reportError to BasicOutputCollector - * Added reportError to BatchOutputCollector - * Added close method to OpaqueTransactionalSpout coordinator - * Added "storm dev-zookeeper" command for launching a local zookeeper server. Useful for testing a one node Storm cluster locally. Zookeeper dir configured with "dev.zookeeper.path" - * Use new style classes for Python multilang adapter (thanks hellp) - * Added "storm version" command - * Heavily refactored and simplified the supervisor and worker code - * Improved error message when duplicate config files found on classpath - * Print the host and port of Nimbus when using the storm command line client - * Include as much of currently read output as possible when pipe to subprocess is broken in multilang components - * Lower supervisor worker start timeout to 120 seconds - * More debug logging in supervisor - * "nohup" no longer used by supervisor to launch workers (unnecessary) - * Throw helpful error message if StormSubmitter used without using storm client script - * Add Values class as a default serialization - * Bug fix: give absolute piddir to subprocesses (so that relative paths can be used for storm local dir) - * Bug fix: Fixed critical bug in transactional topologies where a batch would be considered successful even if the batch didn't finish - * Bug fix: Fixed critical bug in opaque transactional topologies that would lead to duplicate messages when using pipelining - * Bug fix: Workers will now die properly if a ShellBolt subprocess dies (thanks tomo) - * Bug fix: Hide the BasicOutputCollector#getOutputter method, since it shouldn't be a publicly available method - * Bug fix: Zookeeper in local mode now always gets an unused port. This will eliminate conflicts with other local mode processes or other Zookeeper instances on a local machine. (thanks xumingming) - * Bug fix: Fixed NPE in CoordinatedBolt it tuples emitted, acked, or failed for a request id that has already timed out. (thanks xumingming) - * Bug fix: UI no longer errors for topologies with no assigned tasks (thanks xumingming) - * Bug fix: emitDirect on SpoutOutputCollector now works - * Bug fix: Fixed NPE when giving null parallelism hint for spout in TransactionalTopologyBuilder (thanks xumingming) - -## 0.7.1 - - * Implemented shell spout (thanks tomo) - * Shell bolts can now asynchronously emit/ack messages (thanks tomo) - * Added hooks for when a tuple is emitted, acked, or failed in bolts or spouts. - * Added activate and deactivate lifecycle methods on spouts. Spouts start off deactivated. - * Added isReady method to ITransactionalSpout$Coordinator to give the ability to delay the creation of new batches - * Generalized CustomStreamGrouping to return the target tasks rather than the indices. Also parameterized custom groupings with TopologyContext. (not backwards compatible) - * Added localOrShuffle grouping that will send to tasks in the same worker process if possible, or do a shuffle grouping otherwise. - * Removed parameter from TopologyContext#maxTopologyMessageTimeout (simplification). - * Storm now automatically sets TOPOLOGY_NAME in the config passed to the bolts and spouts to the name of the topology. - * Added TOPOLOGY_AUTO_TASK_HOOKS config to automatically add hooks into every spout/bolt for the topology. - * Added ability to override configs at the command line. These config definitions have the highest priority. - * Error thrown if invalid (not json-serializable) topology conf used. - * bin/storm script can now be symlinked (thanks gabrielgrant) - * Socket timeout for DRPCClient is now configurable - * Added getThisWorkerPort() method to TopologyContext - * Added better error checking in Fields (thanks git2samus) - * Improved Clojure DSL to allow destructuring in bolt/spout methods - * Added Nimbus stats methods to LocalCluster (thanks KasperMadsen) - * Added rebalance, activate, deactivate, and killTopologyWithOpts methods to LocalCluster - * Added custom stream groupings to LinearDRPC API - * Simplify multilang protocol to use json for all messages (thanks tomoj) - * Bug fix: Fixed string encoding in ShellBolt protocol to be UTF-8 (thanks nicoo) - * Bug fix: Fixed race condition in FeederSpout that could lead to dropped messages - * Bug fix: Quoted arguments with spaces now work properly with storm client script - * Bug fix: Workers start properly when topology name has spaces - * Bug fix: UI works properly when there are spaces in topology or spout/bolt names (thanks xiaokang) - * Bug fix: Tuple$Seq now returns correct count (thanks travisfw) - -## 0.7.0 - - * Transactional topologies: a new higher level abstraction that enables exactly-once messaging semantics for most computations. Documented on the wiki. - * Component-specific configurations: Can now set configurations on a per-spout or per-bolt basis. - * New batch bolt abstraction that simplifies the processing of batches in DRPC or transactional topologies. A new batch bolt is created per batch and they are automatically cleaned up. - * Introduction of base classes for various bolt and spout types. These base classes are in the backtype.storm.topology.base package and provide empty implementations for commonly unused methods - * CoordinatedBolt generalized to handle non-linear topologies. This will make it easy to implement a non-linear DRPC topology abstraction. - * Can customize the JVM options for Storm UI with new ui.childopts config - * BigIntegers are now serializable by default - * All bolts/spouts now emit a system stream (id "__system"). Currently it only emits startup events, but may emit other events in the future. - * Optimized tuple trees for batch processing in DRPC and transactional topologies. Only the coordination tuples are anchored. OutputCollector#fail still works because CoordinatedBolt will propagate the fail to all other tuples in the batch. - * CoordinatedBolt moved to backtype.storm.coordination package - * Clojure test framework significantly more composable - * Massive internal refactorings and simplifications, including changes to the Thrift definition for storm topologies. - * Optimized acking system. Bolts with zero or more than one consumer used to send an additional ack message. Now those are no longer sent. - * Changed interface of CustomStreamGrouping to receive a List rather than a Tuple. - * Added "storm.zookeeper.retry.times" and "storm.zookeeper.retry.interval" configs (thanks killme2008) - * Added "storm help" and "storm help {cmd}" to storm script (thanks kachayev) - * Logging now always goes to logs/ in the Storm directory, regardless of where you launched the daemon (thanks haitaoyao) - * Improved Clojure DSL: can emit maps and Tuples implement the appropriate interfaces to integrate with Clojure's seq functions (thanks schleyfox) - * Added "ui.childopts" config (thanks ddillinger) - * Bug fix: OutputCollector no longer assumes immutable inputs [NOTE: this was reverted in 0.7.2 because it conflicts with sending tuples to colocated tasks without serialization] - * Bug fix: DRPC topologies now throw a proper error when no DRPC servers are configured instead of NPE (thanks danharvey) - * Bug fix: Fix local mode so multiple topologies can be run on one LocalCluster - * Bug fix: "storm supervisor" now uses supervisor.childopts instead of nimbus.childopts (thanks ddillinger) - * Bug fix: supervisor.childopts and nimbus.childopts can now contain whitespace. Previously only the first token was taken from the string - * Bug fix: Make TopologyContext "getThisTaskIndex" and "getComponentTasks" consistent - * Bug fix: Fix NoNodeException that would pop up with task heartbeating under heavy load - * Bug fix: Catch InterruptedExceptions appropriately in local mode so shutdown always works properly - -## 0.6.2 - - * Automatically delete old files in Nimbus's inbox. Configurable with "nimbus.cleanup.inbox.freq.secs" and "nimbus.inbox.jar.expiration.secs" - * Redirect System.out and System.err to log4j - * Added "topology.worker.child.opts" config, for topology-configurable worker options. - * Use Netflix's Curator library for Zookeeper communication. Workers now reconnect to Zookeeper rather than crash when there's a disconnection. - * Bug fix: DRPC server no longer hangs with too many concurrent requests. DPRC server now requires two ports: "drpc.port" and "drpc.invocations.port" - * Bug fix: Multilang resources are now extracted from the relevant jar on the classpath when appropriate. Previously an error would be thrown if the resources/ dir was in a jar in local mode. - * Bug fix: Fix race condition in unit testing where time simulation fails to detect that Storm cluster is waiting due to threads that are not alive - * Bug fix: Fix deadlock in Nimbus that could be triggered by a kill command. - -## 0.6.1 - - * storm client "activate" and "deactivate" commands - * storm client "rebalance" command - * Nimbus will automatically detect and cleanup corrupt topologies (this would previously give an error of the form "file storm...ser cannot be found"). - * "storm" client will not run unless it's being used from a release. - * Topology jar path now passed in using a java property rather than an environment variable. - * LD\_LIBRARY\_PATH environment variable is now set on worker processes appropriately. - * Replaced jvyaml with snakeyaml. UTF-8 YAML files should now work properly. - * Upgraded httpclient, httpcore, and commons-codec dependencies. - -## 0.6.0 - - * New serialization system based on Kryo - * Component and stream ids are now strings - * Pluggable stream groupings - * Storm now chooses an unused port for Zookeeper in local mode instead of crashing when 2181 was in use. - * Better support for defining topologies in non-JVM languages. The Thrift structure for topologies now allows you to specify components using a Java class name and a list of arguments to that class's constructor. - * Bug fix: errors during the preparation phase of spouts or bolts will be reported to the Storm UI - * Bug fix: Fixed bugs related to LinearDRPC topologies where the last bolt implements FinishedCallback - * Bug fix: String greater than 64K will now serialize properly - * Generalized type of anchors in OutputCollector methods to Collection from List. - * Improved logging throughout. - * In the "worker.childopts" config, %ID% will be replaced by the worker port. - * Significant internal refactorings to clean up the codebase. - -## 0.5.4 - - * LinearDRPCTopologyBuilder, a polished DRPC implementation, - * Improved custom serialization support. no longer need to provide "token" ids. - * Fallback on Java serialization by default. Can be turned off by setting "topology.fall.back.on.java.serialization" to false. - * Improved "storm kill" command. Can override the wait time with "-w" flag. - * Display topology status in Storm UI - * Changed Thrift namespace to avoid conflicts - * Better error messages throughout - * Storm UI port is configurable through "ui.port" - * Minor improvements to Clojure DSL - -## 0.5.3 - - * Nimbus and supervisor daemons can now share a local dir. - * Greatly improved Clojure DSL for creating topologies. - * Increased the default timeouts for startup of workers and tasks. - * Added the commands "localconfvalue", "remoteconfvalue", and "repl" to the storm script. - * Better error message when "storm jar" can't find the nimbus host in the configuration. - -## 0.5.2 - - * No longer need any native dependencies to run Storm in local mode. Storm now uses a pure Java messaging system in local mode - * Fixed logging configurations so that logging is no longer suppressed when including the Storm release jars on the classpath in local mode. - -## 0.5.1 - - * Changed ISerialization's "accept" interface to not annotate the Class with the generic type - * Made Config class implement Map and added helper methods for setting common configs - -## 0.5.0 - - * Initial release! diff --git a/DEPENDENCY-LICENSES b/DEPENDENCY-LICENSES new file mode 100644 index 00000000000..4ed12e9b4cd --- /dev/null +++ b/DEPENDENCY-LICENSES @@ -0,0 +1,454 @@ + +List of third-party dependencies grouped by their license type. + + + 3-Clause BSD License + + * Kryo (com.esotericsoftware:kryo:5.6.2 - https://github.com/EsotericSoftware/kryo/kryo) + * MinLog (com.esotericsoftware:minlog:1.3.1 - https://github.com/EsotericSoftware/minlog) + * ReflectASM (com.esotericsoftware:reflectasm:1.11.9 - https://github.com/EsotericSoftware/reflectasm) + + AL 2.0, GPL v2, MPL 2.0 + + * RabbitMQ Java Client (com.rabbitmq:amqp-client:5.26.0 - https://www.rabbitmq.com) + + Apache License + + * carbonite (org.clojars.bipinprasad:carbonite:1.6.0 - https://github.com/bipinprasad/carbonite) + + Apache License, Version 2.0 + + * ActiveMQ :: Client (org.apache.activemq:activemq-client:6.2.0 - http://activemq.apache.org/activemq-client) + * Annotations for Metrics (io.dropwizard.metrics:metrics-annotation:4.2.37 - https://metrics.dropwizard.io/metrics-annotation) + * Apache Avro (org.apache.avro:avro:1.12.1 - https://avro.apache.org) + * Apache Commons CLI (commons-cli:commons-cli:1.11.0 - https://commons.apache.org/proper/commons-cli/) + * Apache Commons Codec (commons-codec:commons-codec:1.20.0 - https://commons.apache.org/proper/commons-codec/) + * Apache Commons Collections (commons-collections:commons-collections:3.2.2 - http://commons.apache.org/collections/) + * Apache Commons Collections (org.apache.commons:commons-collections4:4.5.0 - https://commons.apache.org/proper/commons-collections/) + * Apache Commons Compress (org.apache.commons:commons-compress:1.28.0 - https://commons.apache.org/proper/commons-compress/) + * Apache Commons Configuration (org.apache.commons:commons-configuration2:2.13.0 - https://commons.apache.org/proper/commons-configuration/) + * Apache Commons Crypto (org.apache.commons:commons-crypto:1.1.0 - https://commons.apache.org/proper/commons-crypto/) + * Apache Commons Exec (org.apache.commons:commons-exec:1.5.0 - https://commons.apache.org/proper/commons-exec/) + * Apache Commons FileUpload (commons-fileupload:commons-fileupload:1.6.0 - https://commons.apache.org/proper/commons-fileupload/) + * Apache Commons IO (commons-io:commons-io:2.21.0 - https://commons.apache.org/proper/commons-io/) + * Apache Commons Lang (org.apache.commons:commons-lang3:3.20.0 - https://commons.apache.org/proper/commons-lang/) + * Apache Commons Logging (commons-logging:commons-logging:1.3.5 - https://commons.apache.org/proper/commons-logging/) + * Apache Commons Math (org.apache.commons:commons-math3:3.6.1 - http://commons.apache.org/proper/commons-math/) + * Apache Commons Net (commons-net:commons-net:3.9.0 - https://commons.apache.org/proper/commons-net/) + * Apache Commons Pool (org.apache.commons:commons-pool2:2.12.1 - https://commons.apache.org/proper/commons-pool/) + * Apache Commons Text (org.apache.commons:commons-text:1.14.0 - https://commons.apache.org/proper/commons-text) + * Apache Hadoop Annotations (org.apache.hadoop:hadoop-annotations:3.4.2 - no url defined) + * Apache Hadoop Auth (org.apache.hadoop:hadoop-auth:3.4.2 - no url defined) + * Apache Hadoop Client API (org.apache.hadoop:hadoop-client-api:3.4.2 - no url defined) + * Apache Hadoop Client Runtime (org.apache.hadoop:hadoop-client-runtime:3.4.2 - no url defined) + * Apache Hadoop Common (org.apache.hadoop:hadoop-common:3.4.2 - no url defined) + * Apache Hadoop shaded Guava (org.apache.hadoop.thirdparty:hadoop-shaded-guava:1.4.0 - https://www.apache.org/hadoop-thirdparty/hadoop-shaded-guava/) + * Apache Hadoop shaded Protobuf (org.apache.hadoop.thirdparty:hadoop-shaded-protobuf_3_25:1.4.0 - https://www.apache.org/hadoop-thirdparty/hadoop-shaded-protobuf_3_25/) + * Apache HBase - Client (org.apache.hbase:hbase-client:2.6.4-hadoop3 - https://hbase.apache.org/hbase-build-configuration/hbase-client) + * Apache HBase - Common (org.apache.hbase:hbase-common:2.6.4-hadoop3 - https://hbase.apache.org/hbase-build-configuration/hbase-common) + * Apache HBase - Hadoop Compatibility (org.apache.hbase:hbase-hadoop-compat:2.6.4-hadoop3 - https://hbase.apache.org/hbase-build-configuration/hbase-hadoop-compat) + * Apache HBase - Hadoop Two Compatibility (org.apache.hbase:hbase-hadoop2-compat:2.6.4-hadoop3 - https://hbase.apache.org/hbase-build-configuration/hbase-hadoop2-compat) + * Apache HBase - Logging (org.apache.hbase:hbase-logging:2.6.4-hadoop3 - https://hbase.apache.org/hbase-build-configuration/hbase-logging) + * Apache HBase - Metrics API (org.apache.hbase:hbase-metrics-api:2.6.4-hadoop3 - https://hbase.apache.org/hbase-build-configuration/hbase-metrics-api) + * Apache HBase - Metrics Implementation (org.apache.hbase:hbase-metrics:2.6.4-hadoop3 - https://hbase.apache.org/hbase-build-configuration/hbase-metrics) + * Apache HBase Patched and Relocated (Shaded) Protobuf (org.apache.hbase.thirdparty:hbase-shaded-protobuf:4.1.12 - https://hbase.apache.org/hbase-shaded-protobuf) + * Apache HBase - Protocol (org.apache.hbase:hbase-protocol:2.6.4-hadoop3 - https://hbase.apache.org/hbase-build-configuration/hbase-protocol) + * Apache HBase Relocated (Shaded) GSON Libs (org.apache.hbase.thirdparty:hbase-shaded-gson:4.1.12 - https://hbase.apache.org/hbase-shaded-gson) + * Apache HBase Relocated (Shaded) Netty Libs (org.apache.hbase.thirdparty:hbase-shaded-netty:4.1.12 - https://hbase.apache.org/hbase-shaded-netty) + * Apache HBase Relocated (Shaded) Third-party Miscellaneous Libs (org.apache.hbase.thirdparty:hbase-shaded-miscellaneous:4.1.12 - https://hbase.apache.org/hbase-shaded-miscellaneous) + * Apache HBase - Shaded Protocol (org.apache.hbase:hbase-protocol-shaded:2.6.4-hadoop3 - https://hbase.apache.org/hbase-build-configuration/hbase-protocol-shaded) + * Apache HBase Unsafe Wrapper (org.apache.hbase.thirdparty:hbase-unsafe:4.1.12 - https://hbase.apache.org/hbase-unsafe) + * Apache HttpClient (org.apache.httpcomponents:httpclient:4.5.14 - http://hc.apache.org/httpcomponents-client-ga) + * Apache HttpClient (org.apache.httpcomponents.client5:httpclient5:5.2.1 - https://hc.apache.org/httpcomponents-client-5.0.x/5.2.1/httpclient5/) + * Apache HttpComponents Core HTTP/1.1 (org.apache.httpcomponents.core5:httpcore5:5.2 - https://hc.apache.org/httpcomponents-core-5.2.x/5.2/httpcore5/) + * Apache HttpComponents Core HTTP/2 (org.apache.httpcomponents.core5:httpcore5-h2:5.2 - https://hc.apache.org/httpcomponents-core-5.2.x/5.2/httpcore5-h2/) + * Apache HttpCore (org.apache.httpcomponents:httpcore:4.4.16 - http://hc.apache.org/httpcomponents-core-ga) + * Apache Kafka (org.apache.kafka:kafka-clients:4.1.1 - https://kafka.apache.org) + * Apache Log4j API (org.apache.logging.log4j:log4j-api:2.25.2 - https://logging.apache.org/log4j/2.x/) + * Apache Log4j Core (org.apache.logging.log4j:log4j-core:2.25.2 - https://logging.apache.org/log4j/2.x/) + * Apache Maven Shade Plugin (org.apache.maven.plugins:maven-shade-plugin:3.6.1 - https://maven.apache.org/plugins/maven-shade-plugin/) + * Apache Maven Shared Utils (org.apache.maven.shared:maven-shared-utils:3.2.1 - https://maven.apache.org/shared/maven-shared-utils/) + * Apache Thrift (org.apache.thrift:libthrift:0.22.0 - https://thrift.apache.org/) + * Apache XBean :: Spring (org.apache.xbean:xbean-spring:4.28 - http://geronimo.apache.org/maven/xbean/4.28/xbean-spring) + * Apache Yetus - Audience Annotations (org.apache.yetus:audience-annotations:0.12.0 - https://yetus.apache.org/audience-annotations) + * Apache Yetus - Audience Annotations (org.apache.yetus:audience-annotations:0.13.0 - https://yetus.apache.org/audience-annotations) + * Apache ZooKeeper - Jute (org.apache.zookeeper:zookeeper-jute:3.9.4 - http://zookeeper.apache.org/zookeeper-jute) + * Apache ZooKeeper - Server (org.apache.zookeeper:zookeeper:3.9.4 - http://zookeeper.apache.org/zookeeper) + * ASM based accessors helper used by json-smart (net.minidev:accessors-smart:2.6.0 - https://urielch.github.io/) + * Auto Common Libraries (com.google.auto:auto-common:1.2.1 - https://github.com/google/auto/tree/master/common) + * AutoService (com.google.auto.service:auto-service-annotations:1.1.1 - https://github.com/google/auto/tree/main/service) + * AutoService Processor (com.google.auto.service:auto-service:1.1.1 - https://github.com/google/auto/tree/main/service) + * Caffeine cache (com.github.ben-manes.caffeine:caffeine:3.2.3 - https://github.com/ben-manes/caffeine) + * CDI APIs (javax.enterprise:cdi-api:1.0 - http://www.seamframework.org/Weld/cdi-api) + * chill-java (com.twitter:chill-java:0.9.5 - https://github.com/twitter/chill) + * ClassMate (com.fasterxml:classmate:1.7.0 - https://github.com/FasterXML/java-classmate) + * com.helger:profiler (com.helger:profiler:1.1.1 - https://github.com/phax/profiler) + * Commons Lang (commons-lang:commons-lang:2.6 - http://commons.apache.org/lang/) + * Curator Client (org.apache.curator:curator-client:5.9.0 - https://curator.apache.org/curator-client) + * Curator Framework (org.apache.curator:curator-framework:5.9.0 - https://curator.apache.org/curator-framework) + * Curator Recipes (org.apache.curator:curator-recipes:5.9.0 - https://curator.apache.org/curator-recipes) + * Dropwizard (io.dropwizard:dropwizard-core:5.0.0 - http://www.dropwizard.io/5.0.0/dropwizard-bom/dropwizard-dependencies/dropwizard-parent/dropwizard-core) + * Dropwizard Asset Bundle (io.dropwizard:dropwizard-assets:5.0.0 - http://www.dropwizard.io/5.0.0/dropwizard-bom/dropwizard-dependencies/dropwizard-parent/dropwizard-assets) + * Dropwizard Configuration Support (io.dropwizard:dropwizard-configuration:5.0.0 - http://www.dropwizard.io/5.0.0/dropwizard-bom/dropwizard-dependencies/dropwizard-parent/dropwizard-configuration) + * Dropwizard Health Checking Support (io.dropwizard:dropwizard-health:5.0.0 - http://www.dropwizard.io/5.0.0/dropwizard-bom/dropwizard-dependencies/dropwizard-parent/dropwizard-health) + * Dropwizard Jackson Support (io.dropwizard:dropwizard-jackson:5.0.0 - http://www.dropwizard.io/5.0.0/dropwizard-bom/dropwizard-dependencies/dropwizard-parent/dropwizard-jackson) + * Dropwizard Jersey Support (io.dropwizard:dropwizard-jersey:5.0.0 - http://www.dropwizard.io/5.0.0/dropwizard-bom/dropwizard-dependencies/dropwizard-parent/dropwizard-jersey) + * Dropwizard Jetty Support (io.dropwizard:dropwizard-jetty:5.0.0 - http://www.dropwizard.io/5.0.0/dropwizard-bom/dropwizard-dependencies/dropwizard-parent/dropwizard-jetty) + * Dropwizard Lifecycle Support (io.dropwizard:dropwizard-lifecycle:5.0.0 - http://www.dropwizard.io/5.0.0/dropwizard-bom/dropwizard-dependencies/dropwizard-parent/dropwizard-lifecycle) + * Dropwizard Logging Support (io.dropwizard:dropwizard-logging:5.0.0 - http://www.dropwizard.io/5.0.0/dropwizard-bom/dropwizard-dependencies/dropwizard-parent/dropwizard-logging) + * Dropwizard Metrics Support (io.dropwizard:dropwizard-metrics:5.0.0 - http://www.dropwizard.io/5.0.0/dropwizard-bom/dropwizard-dependencies/dropwizard-parent/dropwizard-metrics) + * Dropwizard Request Logging Support (io.dropwizard:dropwizard-request-logging:5.0.0 - http://www.dropwizard.io/5.0.0/dropwizard-bom/dropwizard-dependencies/dropwizard-parent/dropwizard-request-logging) + * Dropwizard Servlet Support (io.dropwizard:dropwizard-servlets:5.0.0 - http://www.dropwizard.io/5.0.0/dropwizard-bom/dropwizard-dependencies/dropwizard-parent/dropwizard-servlets) + * Dropwizard Utility Classes (io.dropwizard:dropwizard-util:5.0.0 - http://www.dropwizard.io/5.0.0/dropwizard-bom/dropwizard-dependencies/dropwizard-parent/dropwizard-util) + * Dropwizard Validation Support (io.dropwizard:dropwizard-validation:5.0.0 - http://www.dropwizard.io/5.0.0/dropwizard-bom/dropwizard-dependencies/dropwizard-parent/dropwizard-validation) + * error-prone annotations (com.google.errorprone:error_prone_annotations:2.45.0 - https://errorprone.info/error_prone_annotations) + * FindBugs-jsr305 (com.google.code.findbugs:jsr305:3.0.2 - http://findbugs.sourceforge.net/) + * Google Guice - Core Library (com.google.inject:guice:4.2.1 - https://github.com/google/guice/guice) + * Graphite Integration for Metrics (io.dropwizard.metrics:metrics-graphite:4.2.37 - https://metrics.dropwizard.io/metrics-graphite) + * Gson (com.google.code.gson:gson:2.13.2 - https://github.com/google/gson) + * Guava: Google Core Libraries for Java (com.google.guava:guava:33.5.0-jre - https://github.com/google/guava) + * Guava InternalFutureFailureAccess and InternalFutures (com.google.guava:failureaccess:1.0.3 - https://github.com/google/guava/failureaccess) + * Guava ListenableFuture only (com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava - https://github.com/google/guava/listenablefuture) + * hawtbuf (org.fusesource.hawtbuf:hawtbuf:1.11 - http://hawtbuf.fusesource.org/hawtbuf) + * Hibernate Validator Engine (org.hibernate.validator:hibernate-validator:8.0.3.Final - https://hibernate.org/validator) + * HikariCP (com.zaxxer:HikariCP:7.0.2 - https://github.com/brettwooldridge/HikariCP) + * j2html (com.j2html:j2html:1.6.0 - http://j2html.com) + * J2ObjC Annotations (com.google.j2objc:j2objc-annotations:3.1 - https://github.com/google/j2objc/) + * Jackson-annotations (com.fasterxml.jackson.core:jackson-annotations:2.20 - https://github.com/FasterXML/jackson) + * Jackson-core (com.fasterxml.jackson.core:jackson-core:2.20.1 - https://github.com/FasterXML/jackson-core) + * jackson-databind (com.fasterxml.jackson.core:jackson-databind:2.20.1 - https://github.com/FasterXML/jackson) + * Jackson dataformat: Smile (com.fasterxml.jackson.dataformat:jackson-dataformat-smile:2.20.1 - https://github.com/FasterXML/jackson-dataformats-binary) + * Jackson-dataformat-YAML (com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:2.20.1 - https://github.com/FasterXML/jackson-dataformats-text) + * Jackson datatype: Guava (com.fasterxml.jackson.datatype:jackson-datatype-guava:2.20.1 - https://github.com/FasterXML/jackson-datatypes-collections) + * Jackson datatype: jdk8 (com.fasterxml.jackson.datatype:jackson-datatype-jdk8:2.20.1 - https://github.com/FasterXML/jackson-modules-java8/jackson-datatype-jdk8) + * Jackson datatype: JSR310 (com.fasterxml.jackson.datatype:jackson-datatype-jsr310:2.20.1 - https://github.com/FasterXML/jackson-modules-java8/jackson-datatype-jsr310) + * Jackson Integration for Metrics (io.dropwizard.metrics:metrics-json:4.2.37 - https://metrics.dropwizard.io/metrics-json) + * Jackson Jakarta-RS: base (com.fasterxml.jackson.jakarta.rs:jackson-jakarta-rs-base:2.20.1 - https://github.com/FasterXML/jackson-jakarta-rs-providers/jackson-jakarta-rs-base) + * Jackson Jakarta-RS: JSON (com.fasterxml.jackson.jakarta.rs:jackson-jakarta-rs-json-provider:2.20.1 - https://github.com/FasterXML/jackson-jakarta-rs-providers/jackson-jakarta-rs-json-provider) + * Jackson module: Blackbird (com.fasterxml.jackson.module:jackson-module-blackbird:2.20.1 - https://github.com/FasterXML/jackson-modules-base) + * Jackson module: Jakarta XML Bind Annotations (jakarta.xml.bind) (com.fasterxml.jackson.module:jackson-module-jakarta-xmlbind-annotations:2.20.1 - https://github.com/FasterXML/jackson-modules-base) + * Jackson-module-parameter-names (com.fasterxml.jackson.module:jackson-module-parameter-names:2.20.1 - https://github.com/FasterXML/jackson-modules-java8/jackson-module-parameter-names) + * Jakarta Bean Validation API (jakarta.validation:jakarta.validation-api:3.0.2 - https://beanvalidation.org) + * Jakarta Dependency Injection (jakarta.inject:jakarta.inject-api:2.0.1 - https://github.com/eclipse-ee4j/injection-api) + * Java Concurrency Tools Core Library (org.jctools:jctools-core:4.0.5 - https://github.com/JCTools) + * javax.inject (javax.inject:javax.inject:1 - http://code.google.com/p/atinject/) + * JBoss Logging 3 (org.jboss.logging:jboss-logging:3.6.1.Final - http://www.jboss.org) + * JCIP Annotations under Apache License (com.github.stephenc.jcip:jcip-annotations:1.0-1 - http://stephenc.github.com/jcip-annotations) + * JCL 1.2 implemented over SLF4J (org.slf4j:jcl-over-slf4j:2.0.17 - http://www.slf4j.org) + * jdependency (org.vafer:jdependency:2.10 - http://github.com/tcurdt/jdependency) + * Jettison (org.codehaus.jettison:jettison:1.5.4 - https://github.com/jettison-json/jettison) + * JSON Small and Fast Parser (net.minidev:json-smart:2.6.0 - https://urielch.github.io/) + * JSpecify annotations (org.jspecify:jspecify:1.0.0 - http://jspecify.org/) + * JVM Integration for Metrics (io.dropwizard.metrics:metrics-jvm:4.2.37 - https://metrics.dropwizard.io/metrics-jvm) + * Kerby ASN1 Project (org.apache.kerby:kerby-asn1:2.0.3 - https://directory.apache.org/kerby/kerby-common/kerby-asn1) + * Kerby Config (org.apache.kerby:kerby-config:2.0.3 - https://directory.apache.org/kerby/kerby-common/kerby-config) + * Kerby-kerb core (org.apache.kerby:kerb-core:2.0.3 - https://directory.apache.org/kerby/kerby-kerb/kerb-core) + * Kerby-kerb Crypto (org.apache.kerby:kerb-crypto:2.0.3 - https://directory.apache.org/kerby/kerby-kerb/kerb-crypto) + * Kerby-kerb Util (org.apache.kerby:kerb-util:2.0.3 - https://directory.apache.org/kerby/kerby-kerb/kerb-util) + * Kerby PKIX Project (org.apache.kerby:kerby-pkix:2.0.3 - https://directory.apache.org/kerby/kerby-pkix) + * Kerby Util (org.apache.kerby:kerby-util:2.0.3 - https://directory.apache.org/kerby/kerby-common/kerby-util) + * Log4j Implemented Over SLF4J (org.slf4j:log4j-over-slf4j:2.0.17 - http://www.slf4j.org) + * LZ4 and xxHash (org.lz4:lz4-java:1.8.0 - https://github.com/lz4/lz4-java) + * Maven Artifact (org.apache.maven:maven-artifact:3.8.1 - https://maven.apache.org/ref/3.8.1/maven-artifact/) + * Maven Artifact (org.apache.maven:maven-artifact:3.9.11 - https://maven.apache.org/ref/3.9.11/maven-artifact/) + * Maven Artifact Resolver API (org.apache.maven.resolver:maven-resolver-api:1.6.2 - https://maven.apache.org/resolver/maven-resolver-api/) + * Maven Artifact Resolver API (org.apache.maven.resolver:maven-resolver-api:1.9.24 - https://maven.apache.org/resolver/maven-resolver-api/) + * Maven Artifact Resolver Connector Basic (org.apache.maven.resolver:maven-resolver-connector-basic:1.9.24 - https://maven.apache.org/resolver/maven-resolver-connector-basic/) + * Maven Artifact Resolver Implementation (org.apache.maven.resolver:maven-resolver-impl:1.6.2 - https://maven.apache.org/resolver/maven-resolver-impl/) + * Maven Artifact Resolver Implementation (org.apache.maven.resolver:maven-resolver-impl:1.9.24 - https://maven.apache.org/resolver/maven-resolver-impl/) + * Maven Artifact Resolver Named Locks (org.apache.maven.resolver:maven-resolver-named-locks:1.9.24 - https://maven.apache.org/resolver/maven-resolver-named-locks/) + * Maven Artifact Resolver Provider (org.apache.maven:maven-resolver-provider:3.8.1 - https://maven.apache.org/ref/3.8.1/maven-resolver-provider/) + * Maven Artifact Resolver Provider (org.apache.maven:maven-resolver-provider:3.9.11 - https://maven.apache.org/ref/3.9.11/maven-resolver-provider/) + * Maven Artifact Resolver SPI (org.apache.maven.resolver:maven-resolver-spi:1.6.2 - https://maven.apache.org/resolver/maven-resolver-spi/) + * Maven Artifact Resolver SPI (org.apache.maven.resolver:maven-resolver-spi:1.9.24 - https://maven.apache.org/resolver/maven-resolver-spi/) + * Maven Artifact Resolver Transport File (org.apache.maven.resolver:maven-resolver-transport-file:1.9.24 - https://maven.apache.org/resolver/maven-resolver-transport-file/) + * Maven Artifact Resolver Transport HTTP (org.apache.maven.resolver:maven-resolver-transport-http:1.9.24 - https://maven.apache.org/resolver/maven-resolver-transport-http/) + * Maven Artifact Resolver Utilities (org.apache.maven.resolver:maven-resolver-util:1.6.2 - https://maven.apache.org/resolver/maven-resolver-util/) + * Maven Artifact Resolver Utilities (org.apache.maven.resolver:maven-resolver-util:1.9.24 - https://maven.apache.org/resolver/maven-resolver-util/) + * Maven Builder Support (org.apache.maven:maven-builder-support:3.8.1 - https://maven.apache.org/ref/3.8.1/maven-builder-support/) + * Maven Builder Support (org.apache.maven:maven-builder-support:3.9.11 - https://maven.apache.org/ref/3.9.11/maven-builder-support/) + * Maven Core (org.apache.maven:maven-core:3.8.1 - https://maven.apache.org/ref/3.8.1/maven-core/) + * Maven Model (org.apache.maven:maven-model:3.8.1 - https://maven.apache.org/ref/3.8.1/maven-model/) + * Maven Model (org.apache.maven:maven-model:3.9.11 - https://maven.apache.org/ref/3.9.11/maven-model/) + * Maven Model Builder (org.apache.maven:maven-model-builder:3.8.1 - https://maven.apache.org/ref/3.8.1/maven-model-builder/) + * Maven Model Builder (org.apache.maven:maven-model-builder:3.9.11 - https://maven.apache.org/ref/3.9.11/maven-model-builder/) + * Maven Plugin API (org.apache.maven:maven-plugin-api:3.8.1 - https://maven.apache.org/ref/3.8.1/maven-plugin-api/) + * Maven Plugin Tools Java Annotations (org.apache.maven.plugin-tools:maven-plugin-annotations:3.8.1 - https://maven.apache.org/plugin-tools/maven-plugin-annotations) + * Maven Repository Metadata Model (org.apache.maven:maven-repository-metadata:3.8.1 - https://maven.apache.org/ref/3.8.1/maven-repository-metadata/) + * Maven Repository Metadata Model (org.apache.maven:maven-repository-metadata:3.9.11 - https://maven.apache.org/ref/3.9.11/maven-repository-metadata/) + * Maven Settings (org.apache.maven:maven-settings:3.8.1 - https://maven.apache.org/ref/3.8.1/maven-settings/) + * Maven Settings Builder (org.apache.maven:maven-settings-builder:3.8.1 - https://maven.apache.org/ref/3.8.1/maven-settings-builder/) + * Metrics Core (io.dropwizard.metrics:metrics-core:4.2.37 - https://metrics.dropwizard.io/metrics-core) + * Metrics Health Checks (io.dropwizard.metrics:metrics-healthchecks:4.2.37 - https://metrics.dropwizard.io/metrics-healthchecks) + * Metrics Integration for Jersey 3.x (io.dropwizard.metrics:metrics-jersey3:4.2.37 - https://metrics.dropwizard.io/metrics-jersey3) + * Metrics Integration for Jetty 12.x and higher (io.dropwizard.metrics:metrics-jetty12:4.2.37 - https://metrics.dropwizard.io/metrics-jetty12) + * Metrics Integration for Jetty 12.x and higher with Jakarta EE 10 (io.dropwizard.metrics:metrics-jetty12-ee10:4.2.37 - https://metrics.dropwizard.io/metrics-jetty12-ee10) + * Metrics Integration for Logback (io.dropwizard.metrics:metrics-logback:4.2.37 - https://metrics.dropwizard.io/metrics-logback) + * Metrics Integration with JMX (io.dropwizard.metrics:metrics-jmx:4.2.37 - https://metrics.dropwizard.io/metrics-jmx) + * Metrics Utility Jakarta Servlets (io.dropwizard.metrics:metrics-jakarta-servlets:4.2.37 - https://metrics.dropwizard.io/metrics-jakarta-servlets) + * micrometer-commons (io.micrometer:micrometer-commons:1.14.12 - https://github.com/micrometer-metrics/micrometer) + * micrometer-observation (io.micrometer:micrometer-observation:1.14.12 - https://github.com/micrometer-metrics/micrometer) + * Netty/All-in-One (io.netty:netty-all:4.2.7.Final - https://netty.io/netty-all/) + * Netty/Buffer (io.netty:netty-buffer:4.2.7.Final - https://netty.io/netty-buffer/) + * Netty/Codec/Base (io.netty:netty-codec-base:4.2.7.Final - https://netty.io/netty-codec-base/) + * Netty/Codec/Classes/Quic (io.netty:netty-codec-classes-quic:4.2.7.Final - https://netty.io/netty-codec-classes-quic/) + * Netty/Codec/Compression (io.netty:netty-codec-compression:4.2.7.Final - https://netty.io/netty-codec-compression/) + * Netty/Codec/DNS (io.netty:netty-codec-dns:4.2.7.Final - https://netty.io/netty-codec-dns/) + * Netty/Codec/HAProxy (io.netty:netty-codec-haproxy:4.2.7.Final - https://netty.io/netty-codec-haproxy/) + * Netty/Codec/HTTP (io.netty:netty-codec-http:4.2.7.Final - https://netty.io/netty-codec-http/) + * Netty/Codec/HTTP2 (io.netty:netty-codec-http2:4.2.7.Final - https://netty.io/netty-codec-http2/) + * Netty/Codec/Http3 (io.netty:netty-codec-http3:4.2.7.Final - https://netty.io/netty-codec-http3/) + * Netty/Codec/Marshalling (io.netty:netty-codec-marshalling:4.2.7.Final - https://netty.io/netty-codec-marshalling/) + * Netty/Codec/Memcache (io.netty:netty-codec-memcache:4.2.7.Final - https://netty.io/netty-codec-memcache/) + * Netty/Codec/MQTT (io.netty:netty-codec-mqtt:4.2.7.Final - https://netty.io/netty-codec-mqtt/) + * Netty/Codec/Native/Quic (io.netty:netty-codec-native-quic:4.2.7.Final - https://netty.io/netty-codec-native-quic/) + * Netty/Codec/Protobuf (io.netty:netty-codec-protobuf:4.2.7.Final - https://netty.io/netty-codec-protobuf/) + * Netty/Codec/Redis (io.netty:netty-codec-redis:4.2.7.Final - https://netty.io/netty-codec-redis/) + * Netty/Codec/SMTP (io.netty:netty-codec-smtp:4.2.7.Final - https://netty.io/netty-codec-smtp/) + * Netty/Codec/Socks (io.netty:netty-codec-socks:4.2.7.Final - https://netty.io/netty-codec-socks/) + * Netty/Codec/Stomp (io.netty:netty-codec-stomp:4.2.7.Final - https://netty.io/netty-codec-stomp/) + * Netty/Codec/XML (io.netty:netty-codec-xml:4.2.7.Final - https://netty.io/netty-codec-xml/) + * Netty/Codec (io.netty:netty-codec:4.2.7.Final - https://netty.io/netty-codec/) + * Netty/Common (io.netty:netty-common:4.2.7.Final - https://netty.io/netty-common/) + * Netty/Handler/Proxy (io.netty:netty-handler-proxy:4.2.7.Final - https://netty.io/netty-handler-proxy/) + * Netty/Handler/Ssl/Ocsp (io.netty:netty-handler-ssl-ocsp:4.2.7.Final - https://netty.io/netty-handler-ssl-ocsp/) + * Netty/Handler (io.netty:netty-handler:4.2.7.Final - https://netty.io/netty-handler/) + * Netty/Resolver/DNS/Classes/MacOS (io.netty:netty-resolver-dns-classes-macos:4.2.7.Final - https://netty.io/netty-resolver-dns-classes-macos/) + * Netty/Resolver/DNS/Native/MacOS (io.netty:netty-resolver-dns-native-macos:4.2.7.Final - https://netty.io/netty-resolver-dns-native-macos/) + * Netty/Resolver/DNS (io.netty:netty-resolver-dns:4.2.7.Final - https://netty.io/netty-resolver-dns/) + * Netty/Resolver (io.netty:netty-resolver:4.2.7.Final - https://netty.io/netty-resolver/) + * Netty/TomcatNative [BoringSSL - Static] (io.netty:netty-tcnative-boringssl-static:2.0.74.Final - https://github.com/netty/netty-tcnative/netty-tcnative-boringssl-static/) + * Netty/TomcatNative [OpenSSL - Classes] (io.netty:netty-tcnative-classes:2.0.74.Final - https://github.com/netty/netty-tcnative/netty-tcnative-classes/) + * Netty/TomcatNative [OpenSSL - Dynamic] (io.netty:netty-tcnative:2.0.74.Final - https://github.com/netty/netty-tcnative/netty-tcnative/) + * Netty/Transport/Classes/Epoll (io.netty:netty-transport-classes-epoll:4.2.7.Final - https://netty.io/netty-transport-classes-epoll/) + * Netty/Transport/Classes/io_uring (io.netty:netty-transport-classes-io_uring:4.2.7.Final - https://netty.io/netty-transport-classes-io_uring/) + * Netty/Transport/Classes/KQueue (io.netty:netty-transport-classes-kqueue:4.2.7.Final - https://netty.io/netty-transport-classes-kqueue/) + * Netty/Transport/Native/Epoll (io.netty:netty-transport-native-epoll:4.2.7.Final - https://netty.io/netty-transport-native-epoll/) + * Netty/Transport/Native/io_uring (io.netty:netty-transport-native-io_uring:4.2.7.Final - https://netty.io/netty-transport-native-io_uring/) + * Netty/Transport/Native/KQueue (io.netty:netty-transport-native-kqueue:4.2.7.Final - https://netty.io/netty-transport-native-kqueue/) + * Netty/Transport/Native/Unix/Common (io.netty:netty-transport-native-unix-common:4.2.7.Final - https://netty.io/netty-transport-native-unix-common/) + * Netty/Transport/RXTX (io.netty:netty-transport-rxtx:4.2.7.Final - https://netty.io/netty-transport-rxtx/) + * Netty/Transport/SCTP (io.netty:netty-transport-sctp:4.2.7.Final - https://netty.io/netty-transport-sctp/) + * Netty/Transport/UDT (io.netty:netty-transport-udt:4.2.7.Final - https://netty.io/netty-transport-udt/) + * Netty/Transport (io.netty:netty-transport:4.2.7.Final - https://netty.io/netty-transport/) + * Nimbus JOSE+JWT (com.nimbusds:nimbus-jose-jwt:9.37.2 - https://bitbucket.org/connect2id/nimbus-jose-jwt) + * Objenesis (org.objenesis:objenesis:3.4 - https://objenesis.org/objenesis) + * OpenTelemetry Java (io.opentelemetry:opentelemetry-api:1.49.0 - https://github.com/open-telemetry/opentelemetry-java) + * OpenTelemetry Java (io.opentelemetry:opentelemetry-context:1.49.0 - https://github.com/open-telemetry/opentelemetry-java) + * OpenTelemetry Semantic Conventions Java (io.opentelemetry.semconv:opentelemetry-semconv:1.29.0-alpha - https://github.com/open-telemetry/semantic-conventions-java) + * Plexus :: Component Annotations (org.codehaus.plexus:plexus-component-annotations:2.1.0 - http://codehaus-plexus.github.io/plexus-containers/plexus-component-annotations/) + * Plexus Cipher: encryption/decryption Component (org.sonatype.plexus:plexus-cipher:1.4 - http://spice.sonatype.org/plexus-cipher) + * Plexus Classworlds (org.codehaus.plexus:plexus-classworlds:2.6.0 - http://codehaus-plexus.github.io/plexus-classworlds/) + * Plexus Common Utilities (org.codehaus.plexus:plexus-utils:3.2.1 - http://codehaus-plexus.github.io/plexus-utils/) + * Plexus Common Utilities (org.codehaus.plexus:plexus-utils:3.5.1 - https://codehaus-plexus.github.io/plexus-utils/) + * Plexus Common Utilities (org.codehaus.plexus:plexus-utils:3.6.0 - https://codehaus-plexus.github.io/plexus-utils/) + * Plexus Interpolation API (org.codehaus.plexus:plexus-interpolation:1.25 - http://codehaus-plexus.github.io/plexus-interpolation/) + * Plexus Interpolation API (org.codehaus.plexus:plexus-interpolation:1.28 - https://codehaus-plexus.github.io/plexus-pom/plexus-interpolation/) + * Plexus Security Dispatcher Component (org.sonatype.plexus:plexus-sec-dispatcher:1.4 - http://spice.sonatype.org/plexus-sec-dispatcher) + * Prometheus Metrics Config (io.prometheus:prometheus-metrics-config:1.4.3 - http://github.com/prometheus/client_java/client_java/prometheus-metrics-config) + * Prometheus Metrics Core (io.prometheus:prometheus-metrics-core:1.4.3 - http://github.com/prometheus/client_java/client_java/prometheus-metrics-core) + * Prometheus Metrics Exporter - Common (io.prometheus:prometheus-metrics-exporter-common:1.4.3 - http://github.com/prometheus/client_java/client_java/prometheus-metrics-exporter-common) + * Prometheus Metrics Exporter - Pushgateway (io.prometheus:prometheus-metrics-exporter-pushgateway:1.4.3 - http://github.com/prometheus/client_java/client_java/prometheus-metrics-exporter-pushgateway) + * Prometheus Metrics Exposition Formats (io.prometheus:prometheus-metrics-exposition-formats:1.4.3 - http://github.com/prometheus/client_java/client_java/prometheus-metrics-exposition-formats) + * Prometheus Metrics Exposition Text Formats (io.prometheus:prometheus-metrics-exposition-textformats:1.4.3 - http://github.com/prometheus/client_java/client_java/prometheus-metrics-exposition-textformats) + * Prometheus Metrics Model (io.prometheus:prometheus-metrics-model:1.4.3 - http://github.com/prometheus/client_java/client_java/prometheus-metrics-model) + * Prometheus Metrics Tracer Common (io.prometheus:prometheus-metrics-tracer-common:1.4.3 - http://github.com/prometheus/client_java/client_java/prometheus-metrics-tracer/prometheus-metrics-tracer-common) + * Prometheus Metrics Tracer Initializer (io.prometheus:prometheus-metrics-tracer-initializer:1.4.3 - http://github.com/prometheus/client_java/client_java/prometheus-metrics-tracer/prometheus-metrics-tracer-initializer) + * Prometheus Metrics Tracer OpenTelemetry (io.prometheus:prometheus-metrics-tracer-otel:1.4.3 - http://github.com/prometheus/client_java/client_java/prometheus-metrics-tracer/prometheus-metrics-tracer-otel) + * Prometheus Metrics Tracer OpenTelemetry Agent (io.prometheus:prometheus-metrics-tracer-otel-agent:1.4.3 - http://github.com/prometheus/client_java/client_java/prometheus-metrics-tracer/prometheus-metrics-tracer-otel-agent) + * sigar (org.fusesource:sigar:1.6.4 - http://fusesource.com/sigar/) + * SLF4J 2 Provider for Log4j API (org.apache.logging.log4j:log4j-slf4j2-impl:2.25.2 - https://logging.apache.org/log4j/2.x/) + * SnakeYAML (org.yaml:snakeyaml:2.2 - https://bitbucket.org/snakeyaml/snakeyaml) + * snappy-java (org.xerial.snappy:snappy-java:1.1.10.8 - https://github.com/xerial/snappy-java) + * Spring AOP (org.springframework:spring-aop:6.2.12 - https://github.com/spring-projects/spring-framework) + * Spring Beans (org.springframework:spring-beans:6.2.12 - https://github.com/spring-projects/spring-framework) + * Spring Commons Logging Bridge (org.springframework:spring-jcl:6.2.12 - https://github.com/spring-projects/spring-framework) + * Spring Context (org.springframework:spring-context:6.2.12 - https://github.com/spring-projects/spring-framework) + * Spring Core (org.springframework:spring-core:6.2.12 - https://github.com/spring-projects/spring-framework) + * Spring Expression Language (SpEL) (org.springframework:spring-expression:6.2.12 - https://github.com/spring-projects/spring-framework) + * Spring JMS (org.springframework:spring-jms:6.2.12 - https://github.com/spring-projects/spring-framework) + * Spring Messaging (org.springframework:spring-messaging:6.2.12 - https://github.com/spring-projects/spring-framework) + * Spring Transaction (org.springframework:spring-tx:6.2.12 - https://github.com/spring-projects/spring-framework) + * Throttling Appender (io.dropwizard.logback:logback-throttling-appender:1.5.3 - https://github.com/dropwizard/logback-throttling-appender/) + * Woodstox (com.fasterxml.woodstox:woodstox-core:7.1.1 - https://github.com/FasterXML/woodstox) + + Apache License, Version 2.0, BSD 2-Clause, Eclipse Distribution License, Version 1.0, Eclipse Public License, Version 2.0, jQuery license, MIT License, Modified BSD, Public Domain, The GNU General Public License (GPL), Version 2, With Classpath Exception, W3C license + + * jersey-container-grizzly2-http (org.glassfish.jersey.containers:jersey-container-grizzly2-http:3.1.11 - https://projects.eclipse.org/projects/ee4j.jersey/project/jersey-container-grizzly2-http) + * jersey-container-grizzly2-servlet (org.glassfish.jersey.containers:jersey-container-grizzly2-servlet:3.1.11 - https://projects.eclipse.org/projects/ee4j.jersey/project/jersey-container-grizzly2-servlet) + * jersey-container-servlet (org.glassfish.jersey.containers:jersey-container-servlet:3.1.11 - https://projects.eclipse.org/projects/ee4j.jersey/project/jersey-container-servlet) + * jersey-container-servlet-core (org.glassfish.jersey.containers:jersey-container-servlet-core:3.1.11 - https://projects.eclipse.org/projects/ee4j.jersey/project/jersey-container-servlet-core) + * jersey-core-client (org.glassfish.jersey.core:jersey-client:3.1.11 - https://projects.eclipse.org/projects/ee4j.jersey/jersey-client) + * jersey-ext-bean-validation (org.glassfish.jersey.ext:jersey-bean-validation:3.1.11 - https://projects.eclipse.org/projects/ee4j.jersey/project/jersey-bean-validation) + * jersey-ext-metainf-services (org.glassfish.jersey.ext:jersey-metainf-services:3.1.11 - https://projects.eclipse.org/projects/ee4j.jersey/project/jersey-metainf-services) + * jersey-inject-hk2 (org.glassfish.jersey.inject:jersey-hk2:3.1.11 - https://projects.eclipse.org/projects/ee4j.jersey/project/jersey-hk2) + + Apache License, Version 2.0, Eclipse Public License, Version 2.0 + + * Core :: HTTP (org.eclipse.jetty:jetty-http:12.1.4 - https://jetty.org/jetty-core/jetty-http) + * Core :: IO (org.eclipse.jetty:jetty-io:12.1.4 - https://jetty.org/jetty-core/jetty-io) + * Core :: Security (org.eclipse.jetty:jetty-security:12.1.4 - https://jetty.org/jetty-core/jetty-security) + * Core :: Server (org.eclipse.jetty:jetty-server:12.1.4 - https://jetty.org/jetty-core/jetty-server) + * Core :: Sessions (org.eclipse.jetty:jetty-session:12.1.4 - https://jetty.org/jetty-core/jetty-session) + * Core :: Utilities (org.eclipse.jetty:jetty-util:12.1.4 - https://jetty.org/jetty-core/jetty-util) + * EE10 :: Servlet (org.eclipse.jetty.ee10:jetty-ee10-servlet:12.1.4 - https://jetty.org/jetty-ee10/jetty-ee10-servlet) + * EE10 :: Utility Servlets and Filters (org.eclipse.jetty.ee10:jetty-ee10-servlets:12.1.4 - https://jetty.org/jetty-ee10/jetty-ee10-servlets) + + Apache License, Version 2.0, Eclipse Public License, Version 2.0, Modified BSD, The GNU General Public License (GPL), Version 2, With Classpath Exception + + * jersey-core-server (org.glassfish.jersey.core:jersey-server:3.1.11 - https://projects.eclipse.org/projects/ee4j.jersey/jersey-server) + + Apache License, Version 2.0, Eclipse Public License, Version 2.0, Public Domain, The GNU General Public License (GPL), Version 2, With Classpath Exception + + * jersey-core-common (org.glassfish.jersey.core:jersey-common:3.1.11 - https://projects.eclipse.org/projects/ee4j.jersey/jersey-common) + + Apache License, Version 2.0, Eclipse Public License - Version 2.0 + + * Core :: Security (org.eclipse.jetty:jetty-security:12.1.1 - https://jetty.org/jetty-core/jetty-security) + * Jetty :: SetUID JNA (org.eclipse.jetty.toolchain.setuid:jetty-setuid-jna:2.0.3 - https://eclipse.org/jetty/jetty-setuid-jna) + + Apache License, Version 2.0, GNU General Public License, version 2 + + * RocksDB JNI (org.rocksdb:rocksdbjni:10.2.1 - https://rocksdb.org) + + Apache License, Version 2.0, LGPL 2.1, MPL 1.1 + + * Javassist (org.javassist:javassist:3.30.2-GA - https://www.javassist.org/) + + Apache License, Version 2.0, LGPL-2.1-or-later + + * Java Native Access (net.java.dev.jna:jna-jpms:5.14.0 - https://github.com/java-native-access/jna) + + Bouncy Castle Licence + + * Bouncy Castle ASN.1 Extension and Utility APIs (org.bouncycastle:bcutil-jdk18on:1.83 - https://www.bouncycastle.org/download/bouncy-castle-java/) + * Bouncy Castle PKIX, CMS, EAC, TSP, PKCS, OCSP, CMP, and CRMF APIs (org.bouncycastle:bcpkix-jdk18on:1.83 - https://www.bouncycastle.org/download/bouncy-castle-java/) + * Bouncy Castle Provider (org.bouncycastle:bcprov-jdk18on:1.83 - https://www.bouncycastle.org/download/bouncy-castle-java/) + + BSD-2-Clause, Public Domain, per Creative Commons CC0 + + * HdrHistogram (org.hdrhistogram:HdrHistogram:2.2.2 - http://hdrhistogram.github.io/HdrHistogram/) + + BSD 2-Clause License + + * zstd-jni (com.github.luben:zstd-jni:1.5.6-10 - https://github.com/luben/zstd-jni) + + BSD 3-Clause License + + * asm (org.ow2.asm:asm:9.9 - http://asm.ow2.io/) + * asm-commons (org.ow2.asm:asm-commons:9.9 - http://asm.ow2.io/) + * asm-tree (org.ow2.asm:asm-tree:9.9 - http://asm.ow2.io/) + * dnsjava (dnsjava:dnsjava:3.6.1 - https://github.com/dnsjava/dnsjava) + * Protocol Buffer Java API (com.google.protobuf:protobuf-java:2.5.0 - http://code.google.com/p/protobuf) + + BSD License + + * JLine (jline:jline:2.14.6 - http://nexus.sonatype.org/oss-repository-hosting.html/jline) + * Stax2 API (org.codehaus.woodstox:stax2-api:4.2.1 - http://github.com/FasterXML/stax2-api) + + CDDL/GPLv2+CE + + * JavaBeans Activation Framework API jar (javax.activation:javax.activation-api:1.2.0 - http://java.net/all/javax.activation-api/) + + Common Development and Distribution License + + * Java Servlet API (javax.servlet:javax.servlet-api:3.1.0 - http://servlet-spec.java.net) + * javax.annotation API (javax.annotation:javax.annotation-api:1.3.2 - http://jcp.org/en/jsr/detail?id=250) + + Common Development and Distribution License (CDDL) v1.0 + + * JSR-250 Common Annotations for the JavaTM Platform (javax.annotation:jsr250-api:1.0 - http://jcp.org/aboutJava/communityprocess/final/jsr250/index.html) + + Common Development and Distribution License (CDDL) v1.1, The GNU General Public License (GPL), Version 2, With Classpath Exception + + * jaxb-api (javax.xml.bind:jaxb-api:2.3.1 - https://github.com/javaee/jaxb-spec/jaxb-api) + * JAXB RI (com.sun.xml.bind:jaxb-impl:2.2.3-1 - http://jaxb.java.net/) + * jersey-json (com.github.pjfanning:jersey-json:1.22.0 - https://github.com/pjfanning/jersey-1.x) + * jersey-servlet (com.sun.jersey:jersey-servlet:1.19.4 - https://jersey.java.net/jersey-servlet/) + + Eclipse Distribution License, Version 1.0 + + * Jakarta XML Binding API (jakarta.xml.bind:jakarta.xml.bind-api:4.0.4 - https://github.com/jakartaee/jaxb-api/jakarta.xml.bind-api) + * JavaBeans Activation Framework (com.sun.activation:jakarta.activation:1.2.1 - https://github.com/eclipse-ee4j/jaf/jakarta.activation) + * JavaBeans Activation Framework API jar (jakarta.activation:jakarta.activation-api:1.2.1 - https://github.com/eclipse-ee4j/jaf/jakarta.activation-api) + + Eclipse Public License, Version 1.0 + + * clojure (org.clojure:clojure:1.12.3 - http://clojure.org/) + * core.specs.alpha (org.clojure:core.specs.alpha:0.4.74 - https://github.com/clojure/build.poms/core.specs.alpha) + * org.eclipse.sisu.inject (org.eclipse.sisu:org.eclipse.sisu.inject:0.3.4 - http://www.eclipse.org/sisu/org.eclipse.sisu.inject/) + * org.eclipse.sisu.plexus (org.eclipse.sisu:org.eclipse.sisu.plexus:0.3.4 - http://www.eclipse.org/sisu/org.eclipse.sisu.plexus/) + * spec.alpha (org.clojure:spec.alpha:0.5.238 - https://github.com/clojure/build.poms/spec.alpha) + * tools.logging (org.clojure:tools.logging:1.3.0 - https://github.com/clojure/build.poms/tools.logging) + + Eclipse Public License, Version 1.0, GNU Lesser General Public License + + * Logback Access Common Module (ch.qos.logback.access:logback-access-common:2.0.6 - http://logback.qos.ch/logback-access-common) + * Logback Access Jetty 12 Module (ch.qos.logback.access:logback-access-jetty12:2.0.6 - http://logback.qos.ch/logback-access-jetty12) + + Eclipse Public License, Version 2.0 + + * grizzly-framework (org.glassfish.grizzly:grizzly-framework:4.0.2 - https://projects.eclipse.org/projects/ee4j.grizzly/grizzly-framework) + * grizzly-http (org.glassfish.grizzly:grizzly-http:4.0.2 - https://projects.eclipse.org/projects/ee4j.grizzly/grizzly-http) + * grizzly-http-server (org.glassfish.grizzly:grizzly-http-server:4.0.2 - https://projects.eclipse.org/projects/ee4j.grizzly/grizzly-http-server) + * grizzly-http-servlet (org.glassfish.grizzly:grizzly-http-servlet:4.0.2 - https://projects.eclipse.org/projects/ee4j.grizzly/grizzly-http-servlet) + * org.eclipse.sisu:org.eclipse.sisu.inject (org.eclipse.sisu:org.eclipse.sisu.inject:0.9.0.M4 - https://eclipse.dev/sisu/org.eclipse.sisu.inject/) + + Eclipse Public License, Version 2.0, GPL-2.0-with-classpath-exception + + * Jakarta RESTful WS API (jakarta.ws.rs:jakarta.ws.rs-api:3.1.0 - https://github.com/eclipse-ee4j/jaxrs-api) + + Eclipse Public License, Version 2.0, The GNU General Public License (GPL), Version 2, With Classpath Exception + + * aopalliance version 1.0 repackaged as a module (org.glassfish.hk2.external:aopalliance-repackaged:3.0.6 - https://github.com/eclipse-ee4j/glassfish-hk2/external/aopalliance-repackaged) + * HK2 API module (org.glassfish.hk2:hk2-api:3.0.6 - https://github.com/eclipse-ee4j/glassfish-hk2/hk2-api) + * HK2 Implementation Utilities (org.glassfish.hk2:hk2-utils:3.0.6 - https://github.com/eclipse-ee4j/glassfish-hk2/hk2-utils) + * Jakarta Annotations API (jakarta.annotation:jakarta.annotation-api:2.1.1 - https://projects.eclipse.org/projects/ee4j.ca) + * Jakarta Servlet (jakarta.servlet:jakarta.servlet-api:6.1.0 - https://projects.eclipse.org/projects/ee4j.servlet) + * OSGi resource locator (org.glassfish.hk2:osgi-resource-locator:1.0.3 - https://projects.eclipse.org/projects/ee4j/osgi-resource-locator) + * ServiceLocator Default Implementation (org.glassfish.hk2:hk2-locator:3.0.6 - https://github.com/eclipse-ee4j/glassfish-hk2/hk2-locator) + + Eclipse Public License (EPL) 1.0, GNU Lesser General Public License Version 2.1, February 1999 + + * JGraphT - Core (org.jgrapht:jgrapht-core:0.9.0 - http://www.jgrapht.org/jgrapht-core) + + Eclipse Public License 2.0, GNU General Public License, version 2 with the GNU Classpath Exception + + * Jakarta Messaging API (jakarta.jms:jakarta.jms-api:3.1.0 - https://projects.eclipse.org/projects/ee4j.jms) + + Eclipse Public License v. 2.0, GNU General Public License, version 2 with the GNU Classpath Exception + + * Eclipse Expressly (org.glassfish.expressly:expressly:5.0.0 - https://projects.eclipse.org/projects/ee4j.expressly) + * Jakarta Expression Language API (jakarta.el:jakarta.el-api:5.0.1 - https://projects.eclipse.org/projects/ee4j.el) + + MIT License + + * argparse4j (net.sourceforge.argparse4j:argparse4j:0.9.0 - https://argparse4j.github.io) + * JCodings (org.jruby.jcodings:jcodings:1.0.58 - http://nexus.sonatype.org/oss-repository-hosting.html/jcodings) + * Jedis (redis.clients:jedis:7.1.0 - https://github.com/redis/jedis) + * Joni (org.jruby.joni:joni:2.2.1 - http://nexus.sonatype.org/oss-repository-hosting.html/joni) + * JUL to SLF4J bridge (org.slf4j:jul-to-slf4j:2.0.17 - http://www.slf4j.org) + * redis-authx-core (redis.clients.authentication:redis-authx-core:0.1.1-beta2 - https://github.com/redis/redis-authx-core) + * SLF4J API Module (org.slf4j:slf4j-api:2.0.17 - http://www.slf4j.org) + * System Out and Err redirected to SLF4J (uk.org.lidalia:sysout-over-slf4j:1.0.2 - http://projects.lidalia.org.uk/sysout-over-slf4j/) + + Public Domain + + * AOP alliance (aopalliance:aopalliance:1.0 - http://aopalliance.sourceforge.net) + * JSON in Java (org.json:json:20250517 - https://github.com/douglascrockford/JSON-java) + + Revised BSD + + * JSch (com.jcraft:jsch:0.1.55 - http://www.jcraft.com/jsch/) + + Similar to Apache License but with the acknowledgment clause removed + + * JDOM (org.jdom:jdom2:2.0.6.1 - http://www.jdom.org) + + The BSD 2-Clause License + + * Stax2 API (org.codehaus.woodstox:stax2-api:4.2.2 - http://github.com/FasterXML/stax2-api) + + The Go license + + * re2j (com.google.re2j:re2j:1.1 - http://github.com/google/re2j) diff --git a/DEVELOPER.md b/DEVELOPER.md index 0c8de007d97..007cefab9bc 100644 --- a/DEVELOPER.md +++ b/DEVELOPER.md @@ -19,6 +19,9 @@ Table of Contents * Merge a pull request or patch * Build the code and run the tests * Create a Storm distribution (packaging) +* Best practices + * Testing + * Version Changes * Tools * Source code repositories (git) * Issue tracking (JIRA) @@ -72,7 +75,7 @@ and summarize the desired functionality. Set the form field "Issue type" to "Ne issue tracker before you will need to register an account (free), log in, and then click on the blue "Create Issue" button in the top navigation bar. -You can also opt to send a message to the [Storm Users mailing list](http://storm.incubator.apache.org/community.html). +You can also opt to send a message to the [Storm Users mailing list](http://storm.apache.org/community.html). @@ -81,17 +84,25 @@ You can also opt to send a message to the [Storm Users mailing list](http://stor Before you set out to contribute code we recommend that you familiarize yourself with the Storm codebase, notably by reading through the -[Implementation documentation](http://storm.incubator.apache.org/documentation/Implementation-docs.html). +[Implementation documentation](http://storm.apache.org/documentation/Implementation-docs.html). _If you are interested in contributing code to Storm but do not know where to begin:_ In this case you should [browse our issue tracker for open issues and tasks](https://issues.apache.org/jira/browse/STORM/?selectedTab=com.atlassian.jira.jira-projects-plugin:issues-panel). You may want to start with beginner-friendly, easier issues -([newbie issues](https://issues.apache.org/jira/browse/STORM-58?jql=project%20%3D%20STORM%20AND%20labels%20%3D%20newbie%20AND%20status%20%3D%20Open) +([newbie issues](https://issues.apache.org/jira/issues/?jql=project%20%3D%20STORM%20AND%20status%20%3D%20Open%20AND%20labels%20%3D%20newbie) and [trivial issues](https://issues.apache.org/jira/secure/IssueNavigator.jspa?reset=true&jqlQuery=project+%3D+STORM+AND+resolution+%3D+Unresolved+AND+priority+%3D+Trivial+ORDER+BY+key+DESC&mode=hide)) because they require learning about only an isolated portion of the codebase and are a relatively small amount of work. +Please use idiomatic Clojure style, as explained in [this Clojure style guide][clj-SG]. Another useful reference is +the [Clojure Library Coding Standards][clj-LCS]. Perhaps the most important is consistently writing a clear docstring +for functions, explaining the return value and arguments. As of this writing, the Storm codebase would benefit from +various style improvements. + +[clj-SG]: https://github.com/bbatsov/clojure-style-guide +[clj-LCS]: http://dev.clojure.org/display/community/Library+Coding+Standards + Contributions to the Storm codebase should be sent as GitHub pull requests. See section _Create a pull request_ below for details. If there is any problem with the pull request we can iterate on it using the commenting features of GitHub. @@ -108,14 +119,35 @@ GitHub. 3. Storm committers will iterate with you on the design to make sure you are on the right track. 4. Implement your issue, create a pull request (see below), and iterate from there. +### Testing + +Unit tests and Integration tests are an essential part of code contributions. + +To mark a Java test as a Java integration test, add the annotation `@IntegrationTest` to the test class definition or test method. Make sure the test is a JUnit 5 test. Java integration tests can be in the same package as Java unit tests. + +```java + @Category(IntegrationTest.class) + public class MyIntegrationTest { + ... + } +``` + +To mark a Clojure test as Clojure integration test, the test source must be located in a package with name prefixed by `integration.` + +For example, the test `test/clj/org.apache.storm.drpc_test.clj` is considered a clojure unit test, whereas + `test/clj/integration.org.apache.storm.drpc_test.clj` is considered a clojure integration test. + +Please refer to section Build the code and run the tests for how to run integration tests, and the info on the build phase each test runs. ## Contribute documentation -Documentation contributions are very welcome! The best way to send contributions is as emails through the -[Storm Developers](http://storm.incubator.apache.org/community.html) mailing list. +Documentation contributions are very welcome! +You can contribute documentation by pull request, using the same process as code contribution. +Release specific documentation can be found in `docs/` in this repository. +Documentation not specific to a release, e.g. announcements, is found in the [storm-site](https://github.com/apache/storm-site) repository. If you'd like to build or test the website, refer to the README.md in the storm-site repository. @@ -127,13 +159,13 @@ Documentation contributions are very welcome! The best way to send contribution ### Create a pull request Pull requests should be done against the read-only git repository at -[https://github.com/apache/incubator-storm](https://github.com/apache/incubator-storm). +[https://github.com/apache/storm](https://github.com/apache/storm). Take a look at [Creating a pull request](https://help.github.com/articles/creating-a-pull-request). In a nutshell you need to: 1. [Fork](https://help.github.com/articles/fork-a-repo) the Storm GitHub repository at - [https://github.com/apache/incubator-storm/](https://github.com/apache/incubator-storm/) to your personal GitHub + [https://github.com/apache/storm/](https://github.com/apache/storm/) to your personal GitHub account. See [Fork a repo](https://help.github.com/articles/fork-a-repo) for detailed instructions. 2. Commit any changes to your fork. 3. Send a [pull request](https://help.github.com/articles/creating-a-pull-request) to the Storm GitHub repository @@ -142,19 +174,16 @@ need to: ticket number (e.g. `STORM-123: ...`). You may want to read [Syncing a fork](https://help.github.com/articles/syncing-a-fork) for instructions on how to keep -your fork up to date with the latest changes of the upstream (official) `incubator-storm` repository. +your fork up to date with the latest changes of the upstream (official) `storm` repository. ### Approve a pull request -_NOTE: The information in this section may need to be formalized via proper project bylaws._ - -Pull requests are approved with two +1s from committers and need to be up for at least 72 hours for all committers to -have a chance to comment. In case it was a committer who sent the pull request than two _different_ committers must +1 -the request. +[BYLAWS](http://storm.apache.org/contribute/BYLAWS.html) describes the condition of approval for code / non-code change. +Please refer Approvals -> Actions section for more details. @@ -164,12 +193,15 @@ _This section applies to committers only._ **Important: A pull request must first be properly approved before you are allowed to merge it.** -Committers that are integrating patches or pull requests should use the official Apache repository at -[https://git-wip-us.apache.org/repos/asf/incubator-storm.git](https://git-wip-us.apache.org/repos/asf/incubator-storm.git). +#### Via Github + +You can use the [Gitbox account linking utility](https://gitbox.apache.org/setup/) to link your Apache and Github accounts. This will allow you to merge pull requests using Github's UI. + +#### Via your terminal To pull in a merge request you should generally follow the command line instructions sent out by GitHub. -1. Go to your local copy of the [Apache git repo](https://git-wip-us.apache.org/repos/asf/incubator-storm.git), switch +1. Go to your local copy of the [Apache git repo](https://gitbox.apache.org/repos/asf/storm.git), switch to the `master` branch, and make sure it is up to date. $ git checkout master @@ -184,15 +216,11 @@ To pull in a merge request you should generally follow the command line instruct 3. Merge the pull request into your local test branch. $ git pull + You can use `./dev-tools/storm-merge.py ` to produce the above command most of the time. -4. Assuming that the pull request merges without any conflicts: - Update the top-level `CHANGELOG.md`, and add in the JIRA ticket number (example: `STORM-1234`) and ticket - description to the change log. Make sure that you place the JIRA ticket number in the commit comments where - applicable. - -5. Run any sanity tests that you think are needed. +4. Run any sanity tests that you think are needed. -6. Once you are confident that everything is ok, you can merge your local test branch into your local `master` branch, +5. Once you are confident that everything is ok, you can merge your local test branch into your local `master` branch, and push the changes back to the official Apache repo. # Pull request looks ok, change log was updated, etc. We are ready for pushing. @@ -204,7 +232,7 @@ To pull in a merge request you should generally follow the command line instruct # automatically a short while after you have pushed to the Apache repo. $ git push origin master -7. The last step is updating the corresponding JIRA ticket. [Go to JIRA](https://issues.apache.org/jira/browse/STORM) +6. The last step is updating the corresponding JIRA ticket. [Go to JIRA](https://issues.apache.org/jira/browse/STORM) and resolve the ticket. Be sure to set the `Fix Version/s` field to the version you pushed your changes to. It is usually good practice to thank the author of the pull request for their contribution if you have not done so already. @@ -214,14 +242,93 @@ To pull in a merge request you should generally follow the command line instruct # Build the code and run the tests +## Prerequisites + +In order to build `storm` you need `python3`, `ruby` and `nodejs`. In order to avoid an overfull page we don't provide platform/OS specific installation instructions for those here. Please refer to you platform's/OS' documentation for support. + +The `ruby` version manager `rvm` and `nodejs` version manager `nvm` are for convenience and are used in the tests which run on [GitHub actions](https://github.com/apache/storm/actions). They can be installed using `curl -L https://get.rvm.io | bash -s stable --autolibs=enabled && source ~/.profile` (see the [rvm installation instructions](https://github.com/rvm/rvm) for details) and `wget -qO- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.5/install.sh | bash && source ~/.bashrc` (see the [nvm installation instructions](https://github.com/nvm-sh/nvm) for details). + +With `rvm` and `nvm` installed you can run + +```sh +rvm use 2.7 --install +nvm install 16 +nvm use 16 +``` + +in order to get started as fast as possible. Users can still install a specific version of `ruby` and/or `node` manually. + +You will also need the [mock](https://docs.python.org/3/library/unittest.mock.html) +python testing library (as well as [Python 3.x](https://github.com/pyenv/pyenv)). +With [pip3](https://pip.pypa.io/en/stable/installation/) installed you can run +(Note: python 2.7.x is **not** supported). +``` +pip3 install mock +``` + +## Building + The following commands must be run from the top-level directory. - # Build the code and run the tests - $ mvn clean install +`mvn clean install` + +If you wish to skip the unit tests you can do this by adding `-DskipTests` to the command line. + +If you wish to skip the examples and external modules, you can do this by adding `-P '!examples,!externals'` to the command line. + +In case you modified `storm.thrift`, you have to regenerate thrift code as Java and Python code before compiling whole project. + +```sh +cd storm-client/src +sh genthrift.sh +``` + +## Testing + +Tests are separated in two groups, Unit tests, and Integration tests. Java unit tests, Clojure unit tests, and Clojure integration tests (for reasons inherent to the clojure-maven-plugin) run in the maven `test` phase. Java integration tests run in the maven `integration-test` or `verify` phases. + +To run Clojure and Java unit tests but no integration tests execute the command + + mvn test + +Integration tests require that you activate the profile `integration-test` and that you specify the `maven-failsafe-plugin` in the module pom file. + +To run all Java and Clojure integration tests but no unit tests execute one of the commands + + mvn -P integration-tests-only,examples,externals verify + mvn -P integration-tests-only,examples,externals integration-test - # Build the code but skip the tests - $ mvn clean install -DskipTests=true +To run all unit tests plus Clojure integration tests but no Java integration tests execute the command + + mvn -P all-tests,examples,externals test +To run all unit tests and all integration tests execute one of the commands + + mvn -P all-tests,examples,externals verify + mvn -P all-tests,examples,externals integration-test + + +You can also run tests selectively with `-Dtest=`. This works for both clojure and junit tests. + +Unfortunately you might experience failures in clojure tests which are wrapped in the `maven-clojure-plugin` and thus doesn't provide too much useful output at first sight - you might end up with a maven test failure with an error message as unhelpful as `Clojure failed.`. In this case it's recommended to look into `target/test-reports` of the failed project to see what actual tests have failed or scroll through the maven output looking for obvious issues like missing binaries. + +By default, integration tests are not run in the test phase. To run Java and Clojure integration tests you must enable the profile `integration-tests-only`, or `all-tests`. + +## Listing dependency licenses + +You can generate a list of dependencies and their licenses by running `mvn license:aggregate-add-third-party@generate-and-check-licenses -Dlicense.skipAggregateAddThirdParty=false` in the project root. +The list will be put in DEPENDENCY_LICENSES. + +The license aggregation plugin will use the license listed in a dependency's POM. If the license is missing, or incomplete (e.g. due to multiple licenses), you can override the license by describing the dependency in the THIRD-PARTY.properties file in the project root. + +## Auditing licenses for LICENSE/NOTICE +The LICENSE and NOTICE files contain licenses and notices for source distribution content. The LICENSE-binary and NOTICE-binary apply to the binary distributions. + +When auditing the binary LICENSE-binary and NOTICE-binary, there are a couple of helper scripts available in dev-tools. `collect_license_files` can create an aggregate NOTICE from the libraries in an extracted distribution. The aggregate NOTICE should be adjusted to remove Storm notices and duplicates, and added to the NOTICE-binary. + +The `dev-tools/validate-license-files.py` script will check that LICENSE-binary and DEPENDENCY_LICENSES are up to date. Regenerating DEPENDENCY_LICENSES simply requires rerunning the license plugin (see above). LICENSE-binary must be updated manually. The script will check that the dependencies included in a storm-dist/binary build are present in LICENSE-binary, and that no other dependencies are listed. Any additional or missing dependencies are printed to console, and can be added to LICENSE-binary manually. There will likely be an entry for them in `DEPENDENCY_LICENSES` that can be copy-pasted to LICENSE-binary. + +You can download the dependency licenses by running `mvn package -Dlicense.skipAggregateDownloadLicenses=false -DskipTests` in the project root. This will put the licenses in target/generated-resources. Keep an eye on the Maven output, as some dependencies may not have licenses configured correctly. These will have to be downloaded manually. @@ -231,7 +338,7 @@ You can create a _distribution_ (like what you can download from Apache) as foll do not use the Maven release plugin because creating an official release is the task of our release manager. # First, build the code. - $ mvn clean install # you may skip tests with `-DskipTests=true` to save time + $ mvn clean install # you may skip tests with `-DskipTests=true` to save time # Create the binary distribution. $ cd storm-dist/binary && mvn package @@ -254,6 +361,39 @@ You can verify whether the digital signatures match their corresponding files: $ gpg --verify storm-dist/binary/target/apache-storm-.tar.gz.asc + + +# Best practices + + + + +## Testing + +Tests should never rely on timing in order to pass. Storm can properly test functionality that depends on time by +simulating time, which means we do not have to worry about e.g. random delays failing our tests non-deterministically. + +If you are testing topologies that do not do full tuple acking, then you should be testing using the "tracked +topologies" utilities in `org.apache.storm.testing.clj`. For example, +[test-acking](storm-core/test/clj/org/apache/storm/integration_test.clj) (around line 213) tests the acking system in +Storm using tracked topologies. Here, the key is the `tracked-wait` function: it will only return when both that many +tuples have been emitted by the spouts _and_ the topology is idle (i.e. no tuples have been emitted nor will be emitted +without further input). Note that you should not use tracked topologies for topologies that have tick tuples. + + + +## Version Changes + +An easy way to change versions across all pom files, for example from `1.0.0-SNAPSHOT` to `1.0.0`, is with the maven +versions plugin. + +``` +mvn versions:set #This prompts for a new version +mvn versions:commit +``` + +[Plugin Documentation] (http://www.mojohaus.org/versions-maven-plugin/) + # Tools @@ -267,45 +407,36 @@ The source code of Storm is managed via [git](http://git-scm.com/). For a numbe repository associated with Storm. * **Committers only:** - [https://git-wip-us.apache.org/repos/asf/incubator-storm.git](https://git-wip-us.apache.org/repos/asf/incubator-storm.git) + [https://gitbox.apache.org/repos/asf/storm.git](https://gitbox.apache.org/repos/asf/storm.git) is the official and authoritative git repository for Storm, managed under the umbrella of the Apache Software Foundation. Only official Storm committers will interact with this repository. When you push the first time to this repository git will prompt you for your username and password. Use your Apache user ID and password, i.e. the credentials you configured via [https://id.apache.org/](https://id.apache.org/) after you were [onboarded as a committer](http://www.apache.org/dev/new-committers-guide.html#account-creation). * **Everybody else:** - [https://github.com/apache/incubator-storm/](https://github.com/apache/incubator-storm/) is a read-only mirror of the - official git repository. If you are not a Storm committer (most people) this is the repository you should work - against. See _Development workflow_ above on how you can create a pull request, for instance. - -An automated bot (called _[ASF GitHub Bot](https://issues.apache.org/jira/secure/ViewProfile.jspa?name=githubbot)_ in -[Storm JIRA](https://issues.apache.org/jira/browse/STORM)) runs periodically to merge changes in the -[official Apache repo](https://git-wip-us.apache.org/repos/asf/incubator-storm.git) to the read-only -[GitHub mirror repository](https://github.com/apache/incubator-storm/), and to merge comments in GitHub pull requests to -the [Storm JIRA](https://issues.apache.org/jira/browse/STORM). - + [https://github.com/apache/storm/](https://github.com/apache/storm/) is a mirror of the + ASF git repository. If you are not a Storm committer (most people) this is the repository you should work + against. See _Development workflow_ above on how you can create a pull request, for instance. ## Issue tracking (JIRA) Issue tracking includes tasks such as reporting bugs, requesting and collaborating on new features, and administrative -activities for release management. As an Apache software project we use JIRA as our issue tracking tool. +activities for release management. As an Apache software project we use JIRA as our issue tracking tool. The Storm JIRA is available at: * [https://issues.apache.org/jira/browse/STORM](https://issues.apache.org/jira/browse/STORM) - If you do not have a JIRA account yet, then you can create one via the link above (registration is free). - # Questions? If you have any questions after reading this document, then please reach out to us via the -[Storm Developers](http://storm.incubator.apache.org/community.html) mailing list. +[Storm Developers](http://storm.apache.org/community.html) mailing list. And of course we also welcome any contributions to improve the information in this document! diff --git a/DISCLAIMER b/DISCLAIMER deleted file mode 100644 index 8638904d9db..00000000000 --- a/DISCLAIMER +++ /dev/null @@ -1,10 +0,0 @@ -Apache Storm is an effort undergoing incubation at the Apache Software -Foundation (ASF), sponsored by the Apache Incubator PMC. - -Incubation is required of all newly accepted projects until a further review -indicates that the infrastructure, communications, and decision making process -have stabilized in a manner consistent with other successful ASF projects. - -While incubation status is not necessarily a reflection of the completeness -or stability of the code, it does indicate that the project has yet to be -fully endorsed by the ASF. diff --git a/KEYS b/KEYS index d3bf1da836f..4b90e4df637 100644 --- a/KEYS +++ b/KEYS @@ -79,3 +79,82 @@ Be/J4vDCRO3I+6qUpQwfNaUzjcHBaStzlV35mu/6Xeq7Kkr5VVmqqwT53Xig1laL Vw== =E8Vm -----END PGP PUBLIC KEY BLOCK----- +pub rsa2048 2016-01-21 [SC] + 3D3E9ACA18C5C9FE181A504B37D6756C2F471B9E +uid [ultimate] Jungtaek Lim (HeartSaVioR) +sig 3 37D6756C2F471B9E 2018-02-26 Jungtaek Lim (HeartSaVioR) +uid [ultimate] Jungtaek Lim (HeartSaVioR) +sig 3 37D6756C2F471B9E 2016-01-21 Jungtaek Lim (HeartSaVioR) +sig 950B0EE138256E78 2018-02-27 Gwan-gyeong Mun +sig BBE44E923A970AB7 2018-07-25 Francis Chuang +sig DDB6E9812AD3FAE3 2018-07-25 Julian Hyde (CODE SIGNING KEY) +sub rsa2048 2016-01-21 [E] +sig 37D6756C2F471B9E 2016-01-21 Jungtaek Lim (HeartSaVioR) + +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQENBFagQBgBCADVBeBdgam+O2IqAjf30flo0Yhm9Of4BgJMijbuypqLWT6xSrZh +Z4ZB6/FFbLyhkdKcmkzc+G29aQOYUG6jF94g69ZG4k8Zw+lLSA0r52lvS7zzvsfB ++hFi2w/R57+v7AyHSbAmEJqd5PGpmyyexLMFkcGWZFyyJgrJBQXOBmWpNKEJFIIw +wWla1SWbcEtfaPkTb1VzsUiDcw/hz9HMkgAx2zGaXnGE98ngXNi+RJ7BHEw0r1HD +e/BpmYbAgI2tQmCkC/Q+raeKPsVngho0aAnl5Vb3mUw0Ft+cyAjrdgcEpLYYGHNX +YUoXgfXZ3wMJeA939i591pdAY+JO9//nChBfABEBAAG0Lkp1bmd0YWVrIExpbSAo +SGVhcnRTYVZpb1IpIDxrYWJod2FuQGdtYWlsLmNvbT6JATgEEwECACIFAlagQBgC +GwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEDfWdWwvRxueDqsH/Awcpuud +9bIpwZgfn3rpJyPjZVfIFY6ELYFG+mTXDFum5Lau9F0YRIseu8A+COuKYnC1FZtG +Q/C1rKePddmSV3aeOiorJtfabYAe9yGY+5jy6Fq3fQsJ2FpYRrJjR8xfDLGf38T6 +E9H/FotSFr7lnE2zSkhx/mYk4KhAWQOPls9W8yNTAENtg8vkybbA8egutUbwT5ic +c6I5QsBA3+rSgCnJ3wb82wDEpLHmF4OZzs8/uhbqmIFtzE6TeFQ6UtPH/CEX1Cvk +SKhRz86PzVxWkt4jqthjxHihUKlfw3nfr4QRs4HN0+1RWHNVyzEmyXFSpo4LwmFI +n7DvYxWN3Y6FZ42JATMEEAEIAB0WIQT5+o44yp7gqj5QWr6VCw7hOCVueAUCWpTe +pAAKCRCVCw7hOCVueO1sCACeujHcQjV6uDcU3UZQ+7+54FnS9y7LEdJ9CYv5PkiM +ZWlCjHE/3lVAO5Vl9VoQSxsadoZyNEFpBTiQnrapMJjeYa0fW0525RMD23oqUk+t +ULMlY2bflCSzrRGjstLrtFMgNxzfDWJN0EYzS36sxiniI6S3gQdOkbG4jLe9jZSP +0K8OEF6k9uZm8soRaUVgVH4D+76ktNSPP2wFF6bfglbVpqom1wqoRcDQVDovSBcJ +Bra+ls6C7YAiEuH5sa3j8oEc7l6DB37X7z2mQMP1+SzrkSQAasmh2X+x+RUzittJ +Fc1oksyA4AAt0zE++ZlN5XLp/bSO+o4BIDqQwIv8pxX2iQIcBBABCgAGBQJbWQTj +AAoJELvkTpI6lwq3/BQP/RR7oQhAT7yyBv1pD7IqSvtWEkodmCm5unlf1vCvFJSC +GOFpkQbkkg4d1evgwdkCUfhE9+YopphAT4lBVLdAQWHpOuDYv9Lt6MeTH++0Uopn ++JRu8ZgVnpRg/CZmSBbucMjTvSRAT2y9BGePNh1SemC5FUP2+e1zK18m/ERio3J4 +lIjDDmgEFfqkzGFONLH30pQHDxEFSCZzpndG021xloPPm79T/GYm/GBoX0PoNxW7 +4VA+ugzkCBenJQcBJ0PWTus43Zlog7I86uJH02a40W/Q2G3moL+sNXiuRnp0ch8m +Mi7aHl6D9z4SFlTrgvdpIwc6riBdzWumkd0JlUadO5HCTZ+o/4xYVBikGqadI9OJ +twhrHBu/rcuyaAbA0ncmt+G7+WqxSvC2GMoEKsOwUTCz/gTqF6K4uOdk24RJ9pzY +xqYeyrKlCm95O1+xsb74av2S7Z7xaB7QFNpWb0YWsfy0ERNoHZWsEdfxI/q6aFSG +Ta52s8XtmCSkScjVIXBFjkX/EC3MxATC12yXGspOI10ZIhwuIt6BKL9OlKVgBN7n +PkfRLaqnn80VNutwDkRhiNaSe99pkemFMYMUeAEG59V5NPZHbA8aLW81KY0sfvlj +StZPWd8Pqw0ase/86paHHX0jLNy89ibT72HlH4904kZ7a0pOjvc4/nfQNFVDG51B +iQIzBBABCgAdFiEE3TFPXhdzfXalVNzo3bbpgSrT+uMFAltZBEcACgkQ3bbpgSrT ++uNjmg//XcMDyrtHdQChOCLE1X4bZEkCI6ZzuKogORAHFuueoDQXwhOnMzxfaIWt +xzBtO25RqA8Fe0PNz3t2W7e3OlViyOpPvidQu80sJz6Rv4fkY0ZQ+aejTYfakGgQ +gP7apdK8O14WZXYzrHGtJVKK+7x2uSBYTeTUKjRs8JMYQgqklSwgzz91lFlikXGp +FKgeeKCXacm5ZXrwzAAahVqAJsWqGVO0XSGjOx7jrZrSgXntvoAHvkZ3R67r6FcB +oyFllntlgI3nq9PVir8lWbY1C8GjtuzvcC1hUvY68Ct4Uk6IdCnQtvsDXcrT7Jy6 +8J/1Pl8Mi9gPxng94g2oyquLvxjWnjNXwdYMCAjiQAyB7RtUXI+AcJS6Fcr81vpB +WfOha3xK6zTwNxTGfbV1X1SqEEV/GTh6KxBIlEwTTuleiCI+CsYUElxjyuNNXc7c +/gai8FqVi9xAx2czLPld+FSiia65w6WHymd1E97gunW2LXQECpzoxhL21x7854fi +PB25R8/PAmvolENoKjHM9UYbEitrzM7Bom/6IAkQu7sS5opJvO1CTgQoyLGwUYp0 +5l23gIzp5LqU3ZSLvKAPwnlBlPOmQYX3EF+pw2cYAGjHpjWz7JV4pYUPiSSKrNVb +6Xjg4lCsum/mxr+fD2KK3yYASY/t7780nXGvWMlwy0sqr5l2kHS0L0p1bmd0YWVr +IExpbSAoSGVhcnRTYVZpb1IpIDxrYWJod2FuQGFwYWNoZS5vcmc+iQFOBBMBCAA4 +FiEEPT6ayhjFyf4YGlBLN9Z1bC9HG54FAlqTyMMCGwMFCwkIBwIGFQgJCgsCBBYC +AwECHgECF4AACgkQN9Z1bC9HG57sJwf8CFMnhpITcpQLWf8g1WlRUMx84uZJvssj +3RrzXYm/I0/czumcq9yuauKMMipVxTYGIwampH6SIp6Spa3+WUOqKVrIiElkBI9Y +ERvd10Q3i+fPPdusp30jU25UNYOD3MKranrzr3w6aMmBKa6NP43sy+m8s3o5kTvf +kQyBckMpvo4knTUzWnGff1JDS7qwvR/ovezy4CWbIh3svSAVKsHUAqGIaNATqQcc +jBl0hmlLI6iGhtxCgW7IYKnd6Uc5Orliu6fFHZcB+RedC+SIvr8VR2XJGgA0PPqI +dspnW3AGbRHaxwPI6sH/1fZVZRgxJj150mYehMOrx2dz93HJeVERqrkBDQRWoEAY +AQgA7DhNF9UdO8IgWB649AwQY4maATqfAAJ4EvexSspXKZf+ceRdJgX2RvaDSxFS +HSvSOrA3m1e/VFB86fjzdH1V7V3+oFIEkRDs0M3MkREphmYA/HcEq2xRUPoSvODo +IOP9s/BRjxpTGZhAYT28aM9BqDn+wg+jBrDTG1wwveqi7Wu+zDjzP8FDH6JDGjmk +IGTi51sS/XyU+K/KHbCYlsBrTWNVXuVa/2y8pCaFRa1TYWclIECdEkZWYG4RGJ2L +eoZFgRXIUF8MWw9HR/d2uGpuD86M3WnJgIrOsJvAA1pjLZGfkB5NweGPS4U9P/kZ +wvHFSKsrqTpp7YlsRjVwiD2HwwARAQABiQEfBBgBAgAJBQJWoEAYAhsMAAoJEDfW +dWwvRxue+rMH+wXqAhcJsiNGPNEH2DpGZU8F85u3xQGIwFgENERkT0JIxgg/1si3 +GDmvnodf05LF0V+uXK3ZpLnedWz3+VOpDx64Mff2mLhUzb1ReDx7f7NFUE0ThXMZ +ZM+Ga6Pvoiz2xCQH8OWDK4sT7f5uyEP4ihduOX1AwJOhgikpQv77Hrnsy5XgwXYm +NcLNCq3vx22UnTPhSKGhh/2tbKCVOPzYclyosn/AmCooQoCfJn2SxBkxBGbilx21 +IjW6BromTVpWD1vbZ51xxnKfxAHcSF9YKM4rPwvQFag2VfGgwixy3blQ5rvtY/1k +3YTWSJjeOl6hz/ph0bTZgmP40iaQ7qNiFbY= +=S40+ +-----END PGP PUBLIC KEY BLOCK----- diff --git a/LICENSE b/LICENSE index 5e54a988814..fc3f58c3441 100644 --- a/LICENSE +++ b/LICENSE @@ -203,10 +203,11 @@ ----------------------------------------------------------------------- +For jQuery 3.5.1 (storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/jquery-3.5.1.min.js) -For jQuery 1.6.2 (storm-core/src/ui/public/js/jquery-1.6.2.min.js) +MIT license selected: -Copyright (c) 2009 John Resig, http://jquery.com/ +Copyright OpenJS Foundation and other contributors, https://openjsf.org/ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the @@ -229,7 +230,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ----------------------------------------------------------------------- -For jQuery Cookies 2.2.0 (storm-core/src/ui/public/js/jquery.cookies.2.2.0.min.js) +For jQuery Cookies 2.2.0 (storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/jquery.cookies.2.2.0.min.js) Copyright (c) 2005 - 2010, James Auldridge @@ -254,17 +255,258 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ----------------------------------------------------------------------- -For jQuery TableSorter 2.0.5b (storm-core/src/ui/public/js/jquery.tablesorter.min.js) +For typeahead jquery 0.10.5 (storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/typeahead.jquery.min.js) + +Copyright (c) 2013-2014 Twitter, Inc -Copyright (c) 2007 Christian Bach -Examples and docs at: http://tablesorter.com -Dual licensed under the MIT and GPL licenses: -http://www.opensource.org/licenses/mit-license.php -http://www.gnu.org/licenses/gpl.html +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +----------------------------------------------------------------------- + +For js-yaml.min.js (storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/js-yaml.min.js) + +(The MIT License) + +Copyright (C) 2011-2015 by Vitaly Puzrin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +----------------------------------------------------------------------- + +For dagre.min.js (storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/dagre.min.js) + +Copyright (c) 2012-2014 Chris Pettitt + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +----------------------------------------------------------------------- + +For cytoscape.min.js and cytoscape-dagre.js (storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/cytoscape.min.js and storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/cytoscape-dagre.js) + +Copyright (c) 2016 The Cytoscape Consortium + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the “Software”), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +----------------------------------------------------------------------- + +For esprima.js (storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/esprima.min.js) + +Copyright JS Foundation and other contributors, https://js.foundation/ + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----------------------------------------------------------------------- + +For mustache.js (storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/jquery.mustache.js) + +The MIT License + +Copyright (c) 2009 Chris Wanstrath (Ruby) +Copyright (c) 2010-2014 Jan Lehnardt (JavaScript) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +----------------------------------------------------------------------- + +For moment.js (storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/moment.min.js) + +Copyright (c) 2011-2014 Tim Wood, Iskren Chernev, Moment.js contributors + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + + +----------------------------------------------------------------------- + +For Jquery url plugin (storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/url.min.js) + +Jquery Url (A Jquery plugin for URL parser) v1.8.6 +Source repository: https://github.com/websanova/js-url +Licensed under an MIT-style license. Seehttps://github.com/websanova/js-url#license for details. + +----------------------------------------------------------------------- + + +For jquery.blockUI.min.js (storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/jquery.blockUI.min.js) + +jQuery BlockUI; v20131009 +http://jquery.malsup.com/block/ +Copyright (c) 2013 M. Alsup; Dual licensed: MIT/GPL + +Copyright © 2007-2013 M. Alsup. MIT license selected: -Copyright (c) 2007 Christian Bach +The BlockUI plugin is dual licensed under the MIT and GPL licenses. + +You may use either license. The MIT license is recommended for most projects +because it is simple and easy to understand and it places almost no +restrictions on what you can do with the plugin. + +If the GPL suits your project better you are also free to use the plugin +under that license. + +You do not have to do anything special to choose one license or the other and +you don't have to notify anyone which license you are using. You are free to +use the BlockUI plugin in commercial projects as long as the copyright header is left intact. + +----------------------------------------------------------------------- + +For jquery dataTables v1.10.4 + +(storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/jquery.dataTables.1.10.4.min.js +storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/css/jquery.dataTables.1.10.4.min.css +storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/images/{{back, forward}_{disabled, enabled, enabled_hover}}.png +storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/images/{sort_{asc, asc_disabled, both, desc, desc_disabled}}.png) + +Copyright (c) 2008-2013 SpryMedia Limited +http://datatables.net + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +----------------------------------------------------------------------- + +For jquery dataTables bootstrap integration + +(storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/jquery.dataTables.1.10.4.min.js +storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/dataTables.bootstrap.min.js +storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/css/dataTables.bootstrap.css) + +Copyright (c) 2013-2014 SpryMedia Limited +http://datatables.net/license +https://github.com/DataTables/Plugins/tree/master/integration/bootstrap/3 Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -282,4 +524,115 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. \ No newline at end of file +THE SOFTWARE. + + +----------------------------------------------------------------------- + +For bootstrap v3.3.1 +(storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/bootstrap-3.3.1.min.js +storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/css/bootstrap-3.3.1.min.css) + +Bootstrap v3.3.1 (http://getbootstrap.com) +Copyright 2011-2014 Twitter, Inc. +Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) + +----------------------------------------------------------------------- + +For jQuery JSONFormatter 1.0.1 2015-02-28 +(storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/jsonFormatter.min.js +storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/css/jsonFormatter.min.css) +http://www.jqueryscript.net/other/jQuery-Plugin-For-Pretty-JSON-Formatting-jsonFormatter.htmlA + +The MIT License (MIT) + +Copyright (c) 2015 Matthew Heironimus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +----------------------------------------------------------------------- + +For statistic image + +storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/images/statistic.png + +Copyright (c) 2015 Github, Inc. + + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +----------------------------------------------------------------------- + +For PMML sample files + +examples/storm-pmml-examples/src/main/resources/KNIME_PMML_4.1_Examples_single_audit_logreg.xml +examples/storm-pmml-examples/src/main/resources/Audit.50.csv + +This product bundles PMML Sample Files, which are available under a +"3-clause BSD" license. For details, see http://dmg.org/documents/dmg-pmml-license-2016.pdf + +----------------------------------------------------------------------- + +For vis.js 4.16.1 2016-04-18 +(storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/vis.min.js +storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/css/vis.min.css) + +vis.js +https://github.com/almende/vis +A dynamic, browser-based visualization library. + +The MIT License (MIT) + +Copyright (C) 2011-2016 Almende B.V, http://almende.com + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/LICENSE-binary b/LICENSE-binary new file mode 100644 index 00000000000..af28cbc0a0c --- /dev/null +++ b/LICENSE-binary @@ -0,0 +1,998 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +----------------------------------------------------------------------- + +For jQuery 3.5.1 (storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/jquery-3.5.1.min.js) + +MIT license selected: + +Copyright OpenJS Foundation and other contributors, https://openjsf.org/ + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +----------------------------------------------------------------------- + +For jQuery Cookies 2.2.0 (storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/jquery.cookies.2.2.0.min.js) + +Copyright (c) 2005 - 2010, James Auldridge + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +----------------------------------------------------------------------- + +For typeahead jquery 0.10.5 (storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/typeahead.jquery.min.js) + +Copyright (c) 2013-2014 Twitter, Inc + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +----------------------------------------------------------------------- + +For js-yaml.min.js (storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/js-yaml.min.js) + +(The MIT License) + +Copyright (C) 2011-2015 by Vitaly Puzrin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +----------------------------------------------------------------------- + +For dagre.min.js (storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/dagre.min.js) + +Copyright (c) 2012-2014 Chris Pettitt + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +----------------------------------------------------------------------- + +For cytoscape.min.js and cytoscape-dagre.js (storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/cytoscape.min.js and storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/cytoscape-dagre.js) + +Copyright (c) 2016 The Cytoscape Consortium + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the “Software”), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +----------------------------------------------------------------------- + +For esprima.js (storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/esprima.min.js) + +Copyright JS Foundation and other contributors, https://js.foundation/ + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----------------------------------------------------------------------- + +For mustache.js (storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/jquery.mustache.js) + +The MIT License + +Copyright (c) 2009 Chris Wanstrath (Ruby) +Copyright (c) 2010-2014 Jan Lehnardt (JavaScript) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +----------------------------------------------------------------------- + +For moment.js (storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/moment.min.js) + +Copyright (c) 2011-2014 Tim Wood, Iskren Chernev, Moment.js contributors + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + + +----------------------------------------------------------------------- + +For Jquery url plugin (storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/url.min.js) + +Jquery Url (A Jquery plugin for URL parser) v1.8.6 +Source repository: https://github.com/websanova/js-url +Licensed under an MIT-style license. Seehttps://github.com/websanova/js-url#license for details. + +----------------------------------------------------------------------- + + +For jquery.blockUI.min.js (storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/jquery.blockUI.min.js) + +jQuery BlockUI; v20131009 +http://jquery.malsup.com/block/ +Copyright (c) 2013 M. Alsup; Dual licensed: MIT/GPL + +Copyright © 2007-2013 M. Alsup. + +MIT license selected: + +The BlockUI plugin is dual licensed under the MIT and GPL licenses. + +You may use either license. The MIT license is recommended for most projects +because it is simple and easy to understand and it places almost no +restrictions on what you can do with the plugin. + +If the GPL suits your project better you are also free to use the plugin +under that license. + +You do not have to do anything special to choose one license or the other and +you don't have to notify anyone which license you are using. You are free to +use the BlockUI plugin in commercial projects as long as the copyright header is left intact. + +----------------------------------------------------------------------- + +For jquery dataTables v1.10.4 + +(storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/jquery.dataTables.1.10.4.min.js +storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/css/jquery.dataTables.1.10.4.min.css +storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/images/{{back, forward}_{disabled, enabled, enabled_hover}}.png +storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/images/{sort_{asc, asc_disabled, both, desc, desc_disabled}}.png) + +Copyright (c) 2008-2013 SpryMedia Limited +http://datatables.net + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +----------------------------------------------------------------------- + +For jquery dataTables bootstrap integration + +(storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/jquery.dataTables.1.10.4.min.js +storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/dataTables.bootstrap.min.js +storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/css/dataTables.bootstrap.css) + +Copyright (c) 2013-2014 SpryMedia Limited +http://datatables.net/license +https://github.com/DataTables/Plugins/tree/master/integration/bootstrap/3 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +----------------------------------------------------------------------- + +For bootstrap v3.3.1 +(storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/bootstrap-3.3.1.min.js +storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/css/bootstrap-3.3.1.min.css) + +Bootstrap v3.3.1 (http://getbootstrap.com) +Copyright 2011-2014 Twitter, Inc. +Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) + +----------------------------------------------------------------------- + +For jQuery JSONFormatter 1.0.1 2015-02-28 +(storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/jsonFormatter.min.js +storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/css/jsonFormatter.min.css) +http://www.jqueryscript.net/other/jQuery-Plugin-For-Pretty-JSON-Formatting-jsonFormatter.htmlA + +The MIT License (MIT) + +Copyright (c) 2015 Matthew Heironimus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +----------------------------------------------------------------------- + +For statistic image + +storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/images/statistic.png + +Copyright (c) 2015 Github, Inc. + + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +----------------------------------------------------------------------- + +For PMML sample files + +examples/storm-pmml-examples/src/main/resources/KNIME_PMML_4.1_Examples_single_audit_logreg.xml +examples/storm-pmml-examples/src/main/resources/Audit.50.csv + +This product bundles PMML Sample Files, which are available under a +"3-clause BSD" license. For details, see http://dmg.org/documents/dmg-pmml-license-2016.pdf + +----------------------------------------------------------------------- + +For vis.js 4.16.1 2016-04-18 +(storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/js/vis.min.js +storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/css/vis.min.css) + +vis.js +https://github.com/almende/vis +A dynamic, browser-based visualization library. + +The MIT License (MIT) + +Copyright (C) 2011-2016 Almende B.V, http://almende.com + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +----------------------------END OF SOURCE NOTICES ------------------------------------------- + + +The following dependencies are included in the binary Storm distributions, in addition to the source dependencies listed above. +The license texts of these dependencies can be found in the licenses directory. + + 3-Clause BSD License + + * Kryo (com.esotericsoftware:kryo:5.6.2 - https://github.com/EsotericSoftware/kryo/kryo) + * MinLog (com.esotericsoftware:minlog:1.3.1 - https://github.com/EsotericSoftware/minlog) + * ReflectASM (com.esotericsoftware:reflectasm:1.11.9 - https://github.com/EsotericSoftware/reflectasm) + + AL 2.0, GPL v2, MPL 2.0 + + * RabbitMQ Java Client (com.rabbitmq:amqp-client:5.26.0 - https://www.rabbitmq.com) + + Apache License + + * carbonite (org.clojars.bipinprasad:carbonite:1.6.0 - https://github.com/bipinprasad/carbonite) + + Apache License, Version 2.0 + + * Annotations for Metrics (io.dropwizard.metrics:metrics-annotation:4.2.37 - https://metrics.dropwizard.io/metrics-annotation) + * Apache Avro (org.apache.avro:avro:1.12.1 - https://avro.apache.org) + * Apache Commons CLI (commons-cli:commons-cli:1.11.0 - https://commons.apache.org/proper/commons-cli/) + * Apache Commons Codec (commons-codec:commons-codec:1.20.0 - https://commons.apache.org/proper/commons-codec/) + * Apache Commons Collections (org.apache.commons:commons-collections4:4.5.0 - https://commons.apache.org/proper/commons-collections/) + * Apache Commons Compress (org.apache.commons:commons-compress:1.28.0 - https://commons.apache.org/proper/commons-compress/) + * Apache Commons Configuration (org.apache.commons:commons-configuration2:2.13.0 - https://commons.apache.org/proper/commons-configuration/) + * Apache Commons Crypto (org.apache.commons:commons-crypto:1.1.0 - https://commons.apache.org/proper/commons-crypto/) + * Apache Commons Exec (org.apache.commons:commons-exec:1.5.0 - https://commons.apache.org/proper/commons-exec/) + * Apache Commons FileUpload (commons-fileupload:commons-fileupload:1.6.0 - https://commons.apache.org/proper/commons-fileupload/) + * Apache Commons IO (commons-io:commons-io:2.21.0 - https://commons.apache.org/proper/commons-io/) + * Apache Commons Lang (org.apache.commons:commons-lang3:3.20.0 - https://commons.apache.org/proper/commons-lang/) + * Apache Commons Logging (commons-logging:commons-logging:1.3.5 - https://commons.apache.org/proper/commons-logging/) + * Apache Commons Math (org.apache.commons:commons-math3:3.6.1 - http://commons.apache.org/proper/commons-math/) + * Apache Commons Net (commons-net:commons-net:3.9.0 - https://commons.apache.org/proper/commons-net/) + * Apache Commons Text (org.apache.commons:commons-text:1.14.0 - https://commons.apache.org/proper/commons-text) + * Apache Hadoop Annotations (org.apache.hadoop:hadoop-annotations:3.4.2 - no url defined) + * Apache Hadoop Auth (org.apache.hadoop:hadoop-auth:3.4.2 - no url defined) + * Apache Hadoop Common (org.apache.hadoop:hadoop-common:3.4.2 - no url defined) + * Apache Hadoop shaded Guava (org.apache.hadoop.thirdparty:hadoop-shaded-guava:1.4.0 - https://www.apache.org/hadoop-thirdparty/hadoop-shaded-guava/) + * Apache Hadoop shaded Protobuf (org.apache.hadoop.thirdparty:hadoop-shaded-protobuf_3_25:1.4.0 - https://www.apache.org/hadoop-thirdparty/hadoop-shaded-protobuf_3_25/) + * Apache HBase - Client (org.apache.hbase:hbase-client:2.6.4-hadoop3 - https://hbase.apache.org/hbase-build-configuration/hbase-client) + * Apache HBase - Common (org.apache.hbase:hbase-common:2.6.4-hadoop3 - https://hbase.apache.org/hbase-build-configuration/hbase-common) + * Apache HBase - Hadoop Compatibility (org.apache.hbase:hbase-hadoop-compat:2.6.4-hadoop3 - https://hbase.apache.org/hbase-build-configuration/hbase-hadoop-compat) + * Apache HBase - Hadoop Two Compatibility (org.apache.hbase:hbase-hadoop2-compat:2.6.4-hadoop3 - https://hbase.apache.org/hbase-build-configuration/hbase-hadoop2-compat) + * Apache HBase - Logging (org.apache.hbase:hbase-logging:2.6.4-hadoop3 - https://hbase.apache.org/hbase-build-configuration/hbase-logging) + * Apache HBase - Metrics API (org.apache.hbase:hbase-metrics-api:2.6.4-hadoop3 - https://hbase.apache.org/hbase-build-configuration/hbase-metrics-api) + * Apache HBase - Metrics Implementation (org.apache.hbase:hbase-metrics:2.6.4-hadoop3 - https://hbase.apache.org/hbase-build-configuration/hbase-metrics) + * Apache HBase Patched and Relocated (Shaded) Protobuf (org.apache.hbase.thirdparty:hbase-shaded-protobuf:4.1.12 - https://hbase.apache.org/hbase-shaded-protobuf) + * Apache HBase - Protocol (org.apache.hbase:hbase-protocol:2.6.4-hadoop3 - https://hbase.apache.org/hbase-build-configuration/hbase-protocol) + * Apache HBase Relocated (Shaded) GSON Libs (org.apache.hbase.thirdparty:hbase-shaded-gson:4.1.12 - https://hbase.apache.org/hbase-shaded-gson) + * Apache HBase Relocated (Shaded) Netty Libs (org.apache.hbase.thirdparty:hbase-shaded-netty:4.1.12 - https://hbase.apache.org/hbase-shaded-netty) + * Apache HBase Relocated (Shaded) Third-party Miscellaneous Libs (org.apache.hbase.thirdparty:hbase-shaded-miscellaneous:4.1.12 - https://hbase.apache.org/hbase-shaded-miscellaneous) + * Apache HBase - Shaded Protocol (org.apache.hbase:hbase-protocol-shaded:2.6.4-hadoop3 - https://hbase.apache.org/hbase-build-configuration/hbase-protocol-shaded) + * Apache HBase Unsafe Wrapper (org.apache.hbase.thirdparty:hbase-unsafe:4.1.12 - https://hbase.apache.org/hbase-unsafe) + * Apache HttpClient (org.apache.httpcomponents:httpclient:4.5.14 - http://hc.apache.org/httpcomponents-client-ga) + * Apache HttpCore (org.apache.httpcomponents:httpcore:4.4.16 - http://hc.apache.org/httpcomponents-core-ga) + * Apache Kafka (org.apache.kafka:kafka-clients:4.1.1 - https://kafka.apache.org) + * Apache Log4j API (org.apache.logging.log4j:log4j-api:2.25.2 - https://logging.apache.org/log4j/2.x/) + * Apache Log4j Core (org.apache.logging.log4j:log4j-core:2.25.2 - https://logging.apache.org/log4j/2.x/) + * Apache Thrift (org.apache.thrift:libthrift:0.22.0 - https://thrift.apache.org/) + * Apache Yetus - Audience Annotations (org.apache.yetus:audience-annotations:0.12.0 - https://yetus.apache.org/audience-annotations) + * Apache Yetus - Audience Annotations (org.apache.yetus:audience-annotations:0.13.0 - https://yetus.apache.org/audience-annotations) + * Apache ZooKeeper - Jute (org.apache.zookeeper:zookeeper-jute:3.9.4 - http://zookeeper.apache.org/zookeeper-jute) + * Apache ZooKeeper - Server (org.apache.zookeeper:zookeeper:3.9.4 - http://zookeeper.apache.org/zookeeper) + * ASM based accessors helper used by json-smart (net.minidev:accessors-smart:2.6.0 - https://urielch.github.io/) + * Caffeine cache (com.github.ben-manes.caffeine:caffeine:3.2.3 - https://github.com/ben-manes/caffeine) + * chill-java (com.twitter:chill-java:0.9.5 - https://github.com/twitter/chill) + * ClassMate (com.fasterxml:classmate:1.7.0 - https://github.com/FasterXML/java-classmate) + * com.helger:profiler (com.helger:profiler:1.1.1 - https://github.com/phax/profiler) + * Commons Lang (commons-lang:commons-lang:2.6 - http://commons.apache.org/lang/) + * Curator Client (org.apache.curator:curator-client:5.9.0 - https://curator.apache.org/curator-client) + * Curator Framework (org.apache.curator:curator-framework:5.9.0 - https://curator.apache.org/curator-framework) + * Curator Recipes (org.apache.curator:curator-recipes:5.9.0 - https://curator.apache.org/curator-recipes) + * Dropwizard (io.dropwizard:dropwizard-core:5.0.0 - http://www.dropwizard.io/5.0.0/dropwizard-bom/dropwizard-dependencies/dropwizard-parent/dropwizard-core) + * Dropwizard Asset Bundle (io.dropwizard:dropwizard-assets:5.0.0 - http://www.dropwizard.io/5.0.0/dropwizard-bom/dropwizard-dependencies/dropwizard-parent/dropwizard-assets) + * Dropwizard Configuration Support (io.dropwizard:dropwizard-configuration:5.0.0 - http://www.dropwizard.io/5.0.0/dropwizard-bom/dropwizard-dependencies/dropwizard-parent/dropwizard-configuration) + * Dropwizard Health Checking Support (io.dropwizard:dropwizard-health:5.0.0 - http://www.dropwizard.io/5.0.0/dropwizard-bom/dropwizard-dependencies/dropwizard-parent/dropwizard-health) + * Dropwizard Jackson Support (io.dropwizard:dropwizard-jackson:5.0.0 - http://www.dropwizard.io/5.0.0/dropwizard-bom/dropwizard-dependencies/dropwizard-parent/dropwizard-jackson) + * Dropwizard Jersey Support (io.dropwizard:dropwizard-jersey:5.0.0 - http://www.dropwizard.io/5.0.0/dropwizard-bom/dropwizard-dependencies/dropwizard-parent/dropwizard-jersey) + * Dropwizard Jetty Support (io.dropwizard:dropwizard-jetty:5.0.0 - http://www.dropwizard.io/5.0.0/dropwizard-bom/dropwizard-dependencies/dropwizard-parent/dropwizard-jetty) + * Dropwizard Lifecycle Support (io.dropwizard:dropwizard-lifecycle:5.0.0 - http://www.dropwizard.io/5.0.0/dropwizard-bom/dropwizard-dependencies/dropwizard-parent/dropwizard-lifecycle) + * Dropwizard Logging Support (io.dropwizard:dropwizard-logging:5.0.0 - http://www.dropwizard.io/5.0.0/dropwizard-bom/dropwizard-dependencies/dropwizard-parent/dropwizard-logging) + * Dropwizard Metrics Support (io.dropwizard:dropwizard-metrics:5.0.0 - http://www.dropwizard.io/5.0.0/dropwizard-bom/dropwizard-dependencies/dropwizard-parent/dropwizard-metrics) + * Dropwizard Request Logging Support (io.dropwizard:dropwizard-request-logging:5.0.0 - http://www.dropwizard.io/5.0.0/dropwizard-bom/dropwizard-dependencies/dropwizard-parent/dropwizard-request-logging) + * Dropwizard Servlet Support (io.dropwizard:dropwizard-servlets:5.0.0 - http://www.dropwizard.io/5.0.0/dropwizard-bom/dropwizard-dependencies/dropwizard-parent/dropwizard-servlets) + * Dropwizard Utility Classes (io.dropwizard:dropwizard-util:5.0.0 - http://www.dropwizard.io/5.0.0/dropwizard-bom/dropwizard-dependencies/dropwizard-parent/dropwizard-util) + * Dropwizard Validation Support (io.dropwizard:dropwizard-validation:5.0.0 - http://www.dropwizard.io/5.0.0/dropwizard-bom/dropwizard-dependencies/dropwizard-parent/dropwizard-validation) + * error-prone annotations (com.google.errorprone:error_prone_annotations:2.45.0 - https://errorprone.info/error_prone_annotations) + * FindBugs-jsr305 (com.google.code.findbugs:jsr305:3.0.2 - http://findbugs.sourceforge.net/) + * Graphite Integration for Metrics (io.dropwizard.metrics:metrics-graphite:4.2.37 - https://metrics.dropwizard.io/metrics-graphite) + * Gson (com.google.code.gson:gson:2.13.2 - https://github.com/google/gson) + * Guava: Google Core Libraries for Java (com.google.guava:guava:33.5.0-jre - https://github.com/google/guava) + * Guava InternalFutureFailureAccess and InternalFutures (com.google.guava:failureaccess:1.0.3 - https://github.com/google/guava/failureaccess) + * Guava ListenableFuture only (com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava - https://github.com/google/guava/listenablefuture) + * Hibernate Validator Engine (org.hibernate.validator:hibernate-validator:8.0.3.Final - https://hibernate.org/validator) + * j2html (com.j2html:j2html:1.6.0 - http://j2html.com) + * J2ObjC Annotations (com.google.j2objc:j2objc-annotations:3.1 - https://github.com/google/j2objc/) + * Jackson-annotations (com.fasterxml.jackson.core:jackson-annotations:2.20 - https://github.com/FasterXML/jackson) + * Jackson-core (com.fasterxml.jackson.core:jackson-core:2.20.1 - https://github.com/FasterXML/jackson-core) + * jackson-databind (com.fasterxml.jackson.core:jackson-databind:2.20.1 - https://github.com/FasterXML/jackson) + * Jackson dataformat: Smile (com.fasterxml.jackson.dataformat:jackson-dataformat-smile:2.20.1 - https://github.com/FasterXML/jackson-dataformats-binary) + * Jackson-dataformat-YAML (com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:2.20.1 - https://github.com/FasterXML/jackson-dataformats-text) + * Jackson datatype: Guava (com.fasterxml.jackson.datatype:jackson-datatype-guava:2.20.1 - https://github.com/FasterXML/jackson-datatypes-collections) + * Jackson datatype: jdk8 (com.fasterxml.jackson.datatype:jackson-datatype-jdk8:2.20.1 - https://github.com/FasterXML/jackson-modules-java8/jackson-datatype-jdk8) + * Jackson datatype: JSR310 (com.fasterxml.jackson.datatype:jackson-datatype-jsr310:2.20.1 - https://github.com/FasterXML/jackson-modules-java8/jackson-datatype-jsr310) + * Jackson Integration for Metrics (io.dropwizard.metrics:metrics-json:4.2.37 - https://metrics.dropwizard.io/metrics-json) + * Jackson Jakarta-RS: base (com.fasterxml.jackson.jakarta.rs:jackson-jakarta-rs-base:2.20.1 - https://github.com/FasterXML/jackson-jakarta-rs-providers/jackson-jakarta-rs-base) + * Jackson Jakarta-RS: JSON (com.fasterxml.jackson.jakarta.rs:jackson-jakarta-rs-json-provider:2.20.1 - https://github.com/FasterXML/jackson-jakarta-rs-providers/jackson-jakarta-rs-json-provider) + * Jackson module: Blackbird (com.fasterxml.jackson.module:jackson-module-blackbird:2.20.1 - https://github.com/FasterXML/jackson-modules-base) + * Jackson module: Jakarta XML Bind Annotations (jakarta.xml.bind) (com.fasterxml.jackson.module:jackson-module-jakarta-xmlbind-annotations:2.20.1 - https://github.com/FasterXML/jackson-modules-base) + * Jackson-module-parameter-names (com.fasterxml.jackson.module:jackson-module-parameter-names:2.20.1 - https://github.com/FasterXML/jackson-modules-java8/jackson-module-parameter-names) + * Jakarta Bean Validation API (jakarta.validation:jakarta.validation-api:3.0.2 - https://beanvalidation.org) + * Jakarta Dependency Injection (jakarta.inject:jakarta.inject-api:2.0.1 - https://github.com/eclipse-ee4j/injection-api) + * Java Concurrency Tools Core Library (org.jctools:jctools-core:4.0.5 - https://github.com/JCTools) + * javax.inject (javax.inject:javax.inject:1 - http://code.google.com/p/atinject/) + * JBoss Logging 3 (org.jboss.logging:jboss-logging:3.6.1.Final - http://www.jboss.org) + * JCIP Annotations under Apache License (com.github.stephenc.jcip:jcip-annotations:1.0-1 - http://stephenc.github.com/jcip-annotations) + * JCL 1.2 implemented over SLF4J (org.slf4j:jcl-over-slf4j:2.0.17 - http://www.slf4j.org) + * Jettison (org.codehaus.jettison:jettison:1.5.4 - https://github.com/jettison-json/jettison) + * JSON Small and Fast Parser (net.minidev:json-smart:2.6.0 - https://urielch.github.io/) + * JSpecify annotations (org.jspecify:jspecify:1.0.0 - http://jspecify.org/) + * JVM Integration for Metrics (io.dropwizard.metrics:metrics-jvm:4.2.37 - https://metrics.dropwizard.io/metrics-jvm) + * Kerby ASN1 Project (org.apache.kerby:kerby-asn1:2.0.3 - https://directory.apache.org/kerby/kerby-common/kerby-asn1) + * Kerby Config (org.apache.kerby:kerby-config:2.0.3 - https://directory.apache.org/kerby/kerby-common/kerby-config) + * Kerby-kerb core (org.apache.kerby:kerb-core:2.0.3 - https://directory.apache.org/kerby/kerby-kerb/kerb-core) + * Kerby-kerb Crypto (org.apache.kerby:kerb-crypto:2.0.3 - https://directory.apache.org/kerby/kerby-kerb/kerb-crypto) + * Kerby-kerb Util (org.apache.kerby:kerb-util:2.0.3 - https://directory.apache.org/kerby/kerby-kerb/kerb-util) + * Kerby PKIX Project (org.apache.kerby:kerby-pkix:2.0.3 - https://directory.apache.org/kerby/kerby-pkix) + * Kerby Util (org.apache.kerby:kerby-util:2.0.3 - https://directory.apache.org/kerby/kerby-common/kerby-util) + * Log4j Implemented Over SLF4J (org.slf4j:log4j-over-slf4j:2.0.17 - http://www.slf4j.org) + * LZ4 and xxHash (org.lz4:lz4-java:1.8.0 - https://github.com/lz4/lz4-java) + * Maven Artifact (org.apache.maven:maven-artifact:3.9.11 - https://maven.apache.org/ref/3.9.11/maven-artifact/) + * Maven Artifact Resolver API (org.apache.maven.resolver:maven-resolver-api:1.9.24 - https://maven.apache.org/resolver/maven-resolver-api/) + * Maven Artifact Resolver Connector Basic (org.apache.maven.resolver:maven-resolver-connector-basic:1.9.24 - https://maven.apache.org/resolver/maven-resolver-connector-basic/) + * Maven Artifact Resolver Implementation (org.apache.maven.resolver:maven-resolver-impl:1.9.24 - https://maven.apache.org/resolver/maven-resolver-impl/) + * Maven Artifact Resolver Named Locks (org.apache.maven.resolver:maven-resolver-named-locks:1.9.24 - https://maven.apache.org/resolver/maven-resolver-named-locks/) + * Maven Artifact Resolver Provider (org.apache.maven:maven-resolver-provider:3.9.11 - https://maven.apache.org/ref/3.9.11/maven-resolver-provider/) + * Maven Artifact Resolver SPI (org.apache.maven.resolver:maven-resolver-spi:1.9.24 - https://maven.apache.org/resolver/maven-resolver-spi/) + * Maven Artifact Resolver Transport File (org.apache.maven.resolver:maven-resolver-transport-file:1.9.24 - https://maven.apache.org/resolver/maven-resolver-transport-file/) + * Maven Artifact Resolver Transport HTTP (org.apache.maven.resolver:maven-resolver-transport-http:1.9.24 - https://maven.apache.org/resolver/maven-resolver-transport-http/) + * Maven Artifact Resolver Utilities (org.apache.maven.resolver:maven-resolver-util:1.9.24 - https://maven.apache.org/resolver/maven-resolver-util/) + * Maven Builder Support (org.apache.maven:maven-builder-support:3.9.11 - https://maven.apache.org/ref/3.9.11/maven-builder-support/) + * Maven Model (org.apache.maven:maven-model:3.9.11 - https://maven.apache.org/ref/3.9.11/maven-model/) + * Maven Model Builder (org.apache.maven:maven-model-builder:3.9.11 - https://maven.apache.org/ref/3.9.11/maven-model-builder/) + * Maven Repository Metadata Model (org.apache.maven:maven-repository-metadata:3.9.11 - https://maven.apache.org/ref/3.9.11/maven-repository-metadata/) + * Metrics Core (io.dropwizard.metrics:metrics-core:4.2.37 - https://metrics.dropwizard.io/metrics-core) + * Metrics Health Checks (io.dropwizard.metrics:metrics-healthchecks:4.2.37 - https://metrics.dropwizard.io/metrics-healthchecks) + * Metrics Integration for Jersey 3.x (io.dropwizard.metrics:metrics-jersey3:4.2.37 - https://metrics.dropwizard.io/metrics-jersey3) + * Metrics Integration for Jetty 12.x and higher (io.dropwizard.metrics:metrics-jetty12:4.2.37 - https://metrics.dropwizard.io/metrics-jetty12) + * Metrics Integration for Jetty 12.x and higher with Jakarta EE 10 (io.dropwizard.metrics:metrics-jetty12-ee10:4.2.37 - https://metrics.dropwizard.io/metrics-jetty12-ee10) + * Metrics Integration for Logback (io.dropwizard.metrics:metrics-logback:4.2.37 - https://metrics.dropwizard.io/metrics-logback) + * Metrics Integration with JMX (io.dropwizard.metrics:metrics-jmx:4.2.37 - https://metrics.dropwizard.io/metrics-jmx) + * Metrics Utility Jakarta Servlets (io.dropwizard.metrics:metrics-jakarta-servlets:4.2.37 - https://metrics.dropwizard.io/metrics-jakarta-servlets) + * Netty/All-in-One (io.netty:netty-all:4.2.7.Final - https://netty.io/netty-all/) + * Netty/Buffer (io.netty:netty-buffer:4.2.7.Final - https://netty.io/netty-buffer/) + * Netty/Codec/Base (io.netty:netty-codec-base:4.2.7.Final - https://netty.io/netty-codec-base/) + * Netty/Codec/Classes/Quic (io.netty:netty-codec-classes-quic:4.2.7.Final - https://netty.io/netty-codec-classes-quic/) + * Netty/Codec/Compression (io.netty:netty-codec-compression:4.2.7.Final - https://netty.io/netty-codec-compression/) + * Netty/Codec/DNS (io.netty:netty-codec-dns:4.2.7.Final - https://netty.io/netty-codec-dns/) + * Netty/Codec/HAProxy (io.netty:netty-codec-haproxy:4.2.7.Final - https://netty.io/netty-codec-haproxy/) + * Netty/Codec/HTTP (io.netty:netty-codec-http:4.2.7.Final - https://netty.io/netty-codec-http/) + * Netty/Codec/HTTP2 (io.netty:netty-codec-http2:4.2.7.Final - https://netty.io/netty-codec-http2/) + * Netty/Codec/Http3 (io.netty:netty-codec-http3:4.2.7.Final - https://netty.io/netty-codec-http3/) + * Netty/Codec/Marshalling (io.netty:netty-codec-marshalling:4.2.7.Final - https://netty.io/netty-codec-marshalling/) + * Netty/Codec/Memcache (io.netty:netty-codec-memcache:4.2.7.Final - https://netty.io/netty-codec-memcache/) + * Netty/Codec/MQTT (io.netty:netty-codec-mqtt:4.2.7.Final - https://netty.io/netty-codec-mqtt/) + * Netty/Codec/Protobuf (io.netty:netty-codec-protobuf:4.2.7.Final - https://netty.io/netty-codec-protobuf/) + * Netty/Codec/Redis (io.netty:netty-codec-redis:4.2.7.Final - https://netty.io/netty-codec-redis/) + * Netty/Codec/SMTP (io.netty:netty-codec-smtp:4.2.7.Final - https://netty.io/netty-codec-smtp/) + * Netty/Codec/Socks (io.netty:netty-codec-socks:4.2.7.Final - https://netty.io/netty-codec-socks/) + * Netty/Codec/Stomp (io.netty:netty-codec-stomp:4.2.7.Final - https://netty.io/netty-codec-stomp/) + * Netty/Codec/XML (io.netty:netty-codec-xml:4.2.7.Final - https://netty.io/netty-codec-xml/) + * Netty/Codec (io.netty:netty-codec:4.2.7.Final - https://netty.io/netty-codec/) + * Netty/Common (io.netty:netty-common:4.2.7.Final - https://netty.io/netty-common/) + * Netty/Handler/Proxy (io.netty:netty-handler-proxy:4.2.7.Final - https://netty.io/netty-handler-proxy/) + * Netty/Handler/Ssl/Ocsp (io.netty:netty-handler-ssl-ocsp:4.2.7.Final - https://netty.io/netty-handler-ssl-ocsp/) + * Netty/Handler (io.netty:netty-handler:4.2.7.Final - https://netty.io/netty-handler/) + * Netty/Resolver/DNS/Classes/MacOS (io.netty:netty-resolver-dns-classes-macos:4.2.7.Final - https://netty.io/netty-resolver-dns-classes-macos/) + * Netty/Resolver/DNS (io.netty:netty-resolver-dns:4.2.7.Final - https://netty.io/netty-resolver-dns/) + * Netty/Resolver (io.netty:netty-resolver:4.2.7.Final - https://netty.io/netty-resolver/) + * Netty/TomcatNative [BoringSSL - Static] (io.netty:netty-tcnative-boringssl-static:2.0.74.Final - https://github.com/netty/netty-tcnative/netty-tcnative-boringssl-static/) + * Netty/TomcatNative [OpenSSL - Classes] (io.netty:netty-tcnative-classes:2.0.74.Final - https://github.com/netty/netty-tcnative/netty-tcnative-classes/) + * Netty/TomcatNative [OpenSSL - Dynamic] (io.netty:netty-tcnative:2.0.74.Final - https://github.com/netty/netty-tcnative/netty-tcnative/) + * Netty/Transport/Classes/Epoll (io.netty:netty-transport-classes-epoll:4.2.7.Final - https://netty.io/netty-transport-classes-epoll/) + * Netty/Transport/Classes/io_uring (io.netty:netty-transport-classes-io_uring:4.2.7.Final - https://netty.io/netty-transport-classes-io_uring/) + * Netty/Transport/Classes/KQueue (io.netty:netty-transport-classes-kqueue:4.2.7.Final - https://netty.io/netty-transport-classes-kqueue/) + * Netty/Transport/Native/Epoll (io.netty:netty-transport-native-epoll:4.2.7.Final - https://netty.io/netty-transport-native-epoll/) + * Netty/Transport/Native/Unix/Common (io.netty:netty-transport-native-unix-common:4.2.7.Final - https://netty.io/netty-transport-native-unix-common/) + * Netty/Transport/RXTX (io.netty:netty-transport-rxtx:4.2.7.Final - https://netty.io/netty-transport-rxtx/) + * Netty/Transport/SCTP (io.netty:netty-transport-sctp:4.2.7.Final - https://netty.io/netty-transport-sctp/) + * Netty/Transport/UDT (io.netty:netty-transport-udt:4.2.7.Final - https://netty.io/netty-transport-udt/) + * Netty/Transport (io.netty:netty-transport:4.2.7.Final - https://netty.io/netty-transport/) + * Nimbus JOSE+JWT (com.nimbusds:nimbus-jose-jwt:9.37.2 - https://bitbucket.org/connect2id/nimbus-jose-jwt) + * Objenesis (org.objenesis:objenesis:3.4 - https://objenesis.org/objenesis) + * OpenTelemetry Java (io.opentelemetry:opentelemetry-api:1.49.0 - https://github.com/open-telemetry/opentelemetry-java) + * OpenTelemetry Java (io.opentelemetry:opentelemetry-context:1.49.0 - https://github.com/open-telemetry/opentelemetry-java) + * OpenTelemetry Semantic Conventions Java (io.opentelemetry.semconv:opentelemetry-semconv:1.29.0-alpha - https://github.com/open-telemetry/semantic-conventions-java) + * Plexus Common Utilities (org.codehaus.plexus:plexus-utils:3.6.0 - https://codehaus-plexus.github.io/plexus-utils/) + * Plexus Interpolation API (org.codehaus.plexus:plexus-interpolation:1.28 - https://codehaus-plexus.github.io/plexus-pom/plexus-interpolation/) + * SLF4J 2 Provider for Log4j API (org.apache.logging.log4j:log4j-slf4j2-impl:2.25.2 - https://logging.apache.org/log4j/2.x/) + * SnakeYAML (org.yaml:snakeyaml:2.2 - https://bitbucket.org/snakeyaml/snakeyaml) + * snappy-java (org.xerial.snappy:snappy-java:1.1.10.8 - https://github.com/xerial/snappy-java) + * Throttling Appender (io.dropwizard.logback:logback-throttling-appender:1.5.3 - https://github.com/dropwizard/logback-throttling-appender/) + * Woodstox (com.fasterxml.woodstox:woodstox-core:7.1.1 - https://github.com/FasterXML/woodstox) + + Apache License, Version 2.0, BSD 2-Clause, Eclipse Distribution License, Version 1.0, Eclipse Public License, Version 2.0, jQuery license, MIT License, Modified BSD, Public Domain, The GNU General Public License (GPL), Version 2, With Classpath Exception, W3C license + + * jersey-container-grizzly2-http (org.glassfish.jersey.containers:jersey-container-grizzly2-http:3.1.11 - https://projects.eclipse.org/projects/ee4j.jersey/project/jersey-container-grizzly2-http) + * jersey-container-grizzly2-servlet (org.glassfish.jersey.containers:jersey-container-grizzly2-servlet:3.1.11 - https://projects.eclipse.org/projects/ee4j.jersey/project/jersey-container-grizzly2-servlet) + * jersey-container-servlet (org.glassfish.jersey.containers:jersey-container-servlet:3.1.11 - https://projects.eclipse.org/projects/ee4j.jersey/project/jersey-container-servlet) + * jersey-container-servlet-core (org.glassfish.jersey.containers:jersey-container-servlet-core:3.1.11 - https://projects.eclipse.org/projects/ee4j.jersey/project/jersey-container-servlet-core) + * jersey-core-client (org.glassfish.jersey.core:jersey-client:3.1.11 - https://projects.eclipse.org/projects/ee4j.jersey/jersey-client) + * jersey-ext-bean-validation (org.glassfish.jersey.ext:jersey-bean-validation:3.1.11 - https://projects.eclipse.org/projects/ee4j.jersey/project/jersey-bean-validation) + * jersey-ext-metainf-services (org.glassfish.jersey.ext:jersey-metainf-services:3.1.11 - https://projects.eclipse.org/projects/ee4j.jersey/project/jersey-metainf-services) + * jersey-inject-hk2 (org.glassfish.jersey.inject:jersey-hk2:3.1.11 - https://projects.eclipse.org/projects/ee4j.jersey/project/jersey-hk2) + + Apache License, Version 2.0, Eclipse Public License, Version 2.0 + + * Core :: HTTP (org.eclipse.jetty:jetty-http:12.1.4 - https://jetty.org/jetty-core/jetty-http) + * Core :: IO (org.eclipse.jetty:jetty-io:12.1.4 - https://jetty.org/jetty-core/jetty-io) + * Core :: Security (org.eclipse.jetty:jetty-security:12.1.4 - https://jetty.org/jetty-core/jetty-security) + * Core :: Server (org.eclipse.jetty:jetty-server:12.1.4 - https://jetty.org/jetty-core/jetty-server) + * Core :: Sessions (org.eclipse.jetty:jetty-session:12.1.4 - https://jetty.org/jetty-core/jetty-session) + * Core :: Utilities (org.eclipse.jetty:jetty-util:12.1.4 - https://jetty.org/jetty-core/jetty-util) + * EE10 :: Servlet (org.eclipse.jetty.ee10:jetty-ee10-servlet:12.1.4 - https://jetty.org/jetty-ee10/jetty-ee10-servlet) + * EE10 :: Utility Servlets and Filters (org.eclipse.jetty.ee10:jetty-ee10-servlets:12.1.4 - https://jetty.org/jetty-ee10/jetty-ee10-servlets) + + Apache License, Version 2.0, Eclipse Public License, Version 2.0, Modified BSD, The GNU General Public License (GPL), Version 2, With Classpath Exception + + * jersey-core-server (org.glassfish.jersey.core:jersey-server:3.1.11 - https://projects.eclipse.org/projects/ee4j.jersey/jersey-server) + + Apache License, Version 2.0, Eclipse Public License, Version 2.0, Public Domain, The GNU General Public License (GPL), Version 2, With Classpath Exception + + * jersey-core-common (org.glassfish.jersey.core:jersey-common:3.1.11 - https://projects.eclipse.org/projects/ee4j.jersey/jersey-common) + + Apache License, Version 2.0, Eclipse Public License - Version 2.0 + + * Core :: Security (org.eclipse.jetty:jetty-security:12.1.1 - https://jetty.org/jetty-core/jetty-security) + * Core :: Server (org.eclipse.jetty:jetty-server:12.1.1 - https://jetty.org/jetty-core/jetty-server) + * Core :: Utilities (org.eclipse.jetty:jetty-util:12.1.1 - https://jetty.org/jetty-core/jetty-util) + * Jetty :: SetUID JNA (org.eclipse.jetty.toolchain.setuid:jetty-setuid-jna:2.0.3 - https://eclipse.org/jetty/jetty-setuid-jna) + + Apache License, Version 2.0, GNU General Public License, version 2 + + * RocksDB JNI (org.rocksdb:rocksdbjni:10.2.1 - https://rocksdb.org) + + Apache License, Version 2.0, LGPL 2.1, MPL 1.1 + + * Javassist (org.javassist:javassist:3.30.2-GA - https://www.javassist.org/) + + Apache License, Version 2.0, LGPL-2.1-or-later + + * Java Native Access (net.java.dev.jna:jna-jpms:5.14.0 - https://github.com/java-native-access/jna) + + Bouncy Castle Licence + + * Bouncy Castle ASN.1 Extension and Utility APIs (org.bouncycastle:bcutil-jdk18on:1.83 - https://www.bouncycastle.org/download/bouncy-castle-java/) + * Bouncy Castle PKIX, CMS, EAC, TSP, PKCS, OCSP, CMP, and CRMF APIs (org.bouncycastle:bcpkix-jdk18on:1.83 - https://www.bouncycastle.org/download/bouncy-castle-java/) + * Bouncy Castle Provider (org.bouncycastle:bcprov-jdk18on:1.83 - https://www.bouncycastle.org/download/bouncy-castle-java/) + + BSD 2-Clause License + + * zstd-jni (com.github.luben:zstd-jni:1.5.6-10 - https://github.com/luben/zstd-jni) + + BSD 3-Clause License + + * asm (org.ow2.asm:asm:9.9 - http://asm.ow2.io/) + * dnsjava (dnsjava:dnsjava:3.6.1 - https://github.com/dnsjava/dnsjava) + * Protocol Buffer Java API (com.google.protobuf:protobuf-java:2.5.0 - http://code.google.com/p/protobuf) + + BSD License + + * JLine (jline:jline:2.14.6 - http://nexus.sonatype.org/oss-repository-hosting.html/jline) + * Stax2 API (org.codehaus.woodstox:stax2-api:4.2.1 - http://github.com/FasterXML/stax2-api) + + CDDL/GPLv2+CE + + * JavaBeans Activation Framework API jar (javax.activation:javax.activation-api:1.2.0 - http://java.net/all/javax.activation-api/) + + Common Development and Distribution License + + * Java Servlet API (javax.servlet:javax.servlet-api:3.1.0 - http://servlet-spec.java.net) + * javax.annotation API (javax.annotation:javax.annotation-api:1.3.2 - http://jcp.org/en/jsr/detail?id=250) + + Common Development and Distribution License (CDDL) v1.1, The GNU General Public License (GPL), Version 2, With Classpath Exception + + * jaxb-api (javax.xml.bind:jaxb-api:2.3.1 - https://github.com/javaee/jaxb-spec/jaxb-api) + * JAXB RI (com.sun.xml.bind:jaxb-impl:2.2.3-1 - http://jaxb.java.net/) + * jersey-json (com.github.pjfanning:jersey-json:1.22.0 - https://github.com/pjfanning/jersey-1.x) + * jersey-servlet (com.sun.jersey:jersey-servlet:1.19.4 - https://jersey.java.net/jersey-servlet/) + + Eclipse Distribution License, Version 1.0 + + * Jakarta XML Binding API (jakarta.xml.bind:jakarta.xml.bind-api:4.0.4 - https://github.com/jakartaee/jaxb-api/jakarta.xml.bind-api) + * JavaBeans Activation Framework (com.sun.activation:jakarta.activation:1.2.1 - https://github.com/eclipse-ee4j/jaf/jakarta.activation) + * JavaBeans Activation Framework API jar (jakarta.activation:jakarta.activation-api:1.2.1 - https://github.com/eclipse-ee4j/jaf/jakarta.activation-api) + + Eclipse Public License, Version 1.0 + + * clojure (org.clojure:clojure:1.12.3 - http://clojure.org/) + * core.specs.alpha (org.clojure:core.specs.alpha:0.4.74 - https://github.com/clojure/build.poms/core.specs.alpha) + * spec.alpha (org.clojure:spec.alpha:0.5.238 - https://github.com/clojure/build.poms/spec.alpha) + * tools.logging (org.clojure:tools.logging:1.3.0 - https://github.com/clojure/build.poms/tools.logging) + + Eclipse Public License, Version 1.0, GNU Lesser General Public License + + * Logback Access Common Module (ch.qos.logback.access:logback-access-common:2.0.6 - http://logback.qos.ch/logback-access-common) + * Logback Access Jetty 12 Module (ch.qos.logback.access:logback-access-jetty12:2.0.6 - http://logback.qos.ch/logback-access-jetty12) + + Eclipse Public License, Version 2.0 + + * grizzly-framework (org.glassfish.grizzly:grizzly-framework:4.0.2 - https://projects.eclipse.org/projects/ee4j.grizzly/grizzly-framework) + * grizzly-http (org.glassfish.grizzly:grizzly-http:4.0.2 - https://projects.eclipse.org/projects/ee4j.grizzly/grizzly-http) + * grizzly-http-server (org.glassfish.grizzly:grizzly-http-server:4.0.2 - https://projects.eclipse.org/projects/ee4j.grizzly/grizzly-http-server) + * grizzly-http-servlet (org.glassfish.grizzly:grizzly-http-servlet:4.0.2 - https://projects.eclipse.org/projects/ee4j.grizzly/grizzly-http-servlet) + * org.eclipse.sisu:org.eclipse.sisu.inject (org.eclipse.sisu:org.eclipse.sisu.inject:0.9.0.M4 - https://eclipse.dev/sisu/org.eclipse.sisu.inject/) + + Eclipse Public License, Version 2.0, GPL-2.0-with-classpath-exception + + * Jakarta RESTful WS API (jakarta.ws.rs:jakarta.ws.rs-api:3.1.0 - https://github.com/eclipse-ee4j/jaxrs-api) + + Eclipse Public License, Version 2.0, The GNU General Public License (GPL), Version 2, With Classpath Exception + + * aopalliance version 1.0 repackaged as a module (org.glassfish.hk2.external:aopalliance-repackaged:3.0.6 - https://github.com/eclipse-ee4j/glassfish-hk2/external/aopalliance-repackaged) + * HK2 API module (org.glassfish.hk2:hk2-api:3.0.6 - https://github.com/eclipse-ee4j/glassfish-hk2/hk2-api) + * HK2 Implementation Utilities (org.glassfish.hk2:hk2-utils:3.0.6 - https://github.com/eclipse-ee4j/glassfish-hk2/hk2-utils) + * Jakarta Annotations API (jakarta.annotation:jakarta.annotation-api:2.1.1 - https://projects.eclipse.org/projects/ee4j.ca) + * Jakarta Servlet (jakarta.servlet:jakarta.servlet-api:6.1.0 - https://projects.eclipse.org/projects/ee4j.servlet) + * OSGi resource locator (org.glassfish.hk2:osgi-resource-locator:1.0.3 - https://projects.eclipse.org/projects/ee4j/osgi-resource-locator) + * ServiceLocator Default Implementation (org.glassfish.hk2:hk2-locator:3.0.6 - https://github.com/eclipse-ee4j/glassfish-hk2/hk2-locator) + + Eclipse Public License (EPL) 1.0, GNU Lesser General Public License Version 2.1, February 1999 + + * JGraphT - Core (org.jgrapht:jgrapht-core:0.9.0 - http://www.jgrapht.org/jgrapht-core) + + Eclipse Public License v. 2.0, GNU General Public License, version 2 with the GNU Classpath Exception + + * Eclipse Expressly (org.glassfish.expressly:expressly:5.0.0 - https://projects.eclipse.org/projects/ee4j.expressly) + * Jakarta Expression Language API (jakarta.el:jakarta.el-api:5.0.1 - https://projects.eclipse.org/projects/ee4j.el) + + MIT License + + * argparse4j (net.sourceforge.argparse4j:argparse4j:0.9.0 - https://argparse4j.github.io) + * JCodings (org.jruby.jcodings:jcodings:1.0.58 - http://nexus.sonatype.org/oss-repository-hosting.html/jcodings) + * Joni (org.jruby.joni:joni:2.2.1 - http://nexus.sonatype.org/oss-repository-hosting.html/joni) + * JUL to SLF4J bridge (org.slf4j:jul-to-slf4j:2.0.17 - http://www.slf4j.org) + * SLF4J API Module (org.slf4j:slf4j-api:2.0.17 - http://www.slf4j.org) + * System Out and Err redirected to SLF4J (uk.org.lidalia:sysout-over-slf4j:1.0.2 - http://projects.lidalia.org.uk/sysout-over-slf4j/) + + Revised BSD + + * JSch (com.jcraft:jsch:0.1.55 - http://www.jcraft.com/jsch/) + + The Go license + + * re2j (com.google.re2j:re2j:1.1 - http://github.com/google/re2j) \ No newline at end of file diff --git a/NOTICE b/NOTICE index 637e578c5e4..39041e42d6f 100644 --- a/NOTICE +++ b/NOTICE @@ -1,8 +1,8 @@ Apache Storm -Copyright 2014 The Apache Software Foundation +Copyright 2015 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (http://www.apache.org/). This product includes software developed by Yahoo! Inc. (www.yahoo.com) -Copyright (c) 2012-2014 Yahoo! Inc. \ No newline at end of file +Copyright (c) 2012-2015 Yahoo! Inc. \ No newline at end of file diff --git a/NOTICE-binary b/NOTICE-binary new file mode 100644 index 00000000000..856c1590d9b --- /dev/null +++ b/NOTICE-binary @@ -0,0 +1,1341 @@ +Apache Storm +Copyright 2015 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +This product includes software developed by Yahoo! Inc. (www.yahoo.com) +Copyright (c) 2012-2015 Yahoo! Inc. + +======================== + +ApacheDS I18n +Copyright 2003-2013 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +ApacheDS Protocol Kerberos Codec +Copyright 2003-2013 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Apache Directory API ASN.1 API +Copyright 2003-2013 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Apache Directory LDAP API Utilities +Copyright 2003-2013 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Apache Yetus - Audience Annotations +Copyright 2015-2017 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + +Apache Calcite Avatica +Copyright 2012-2017 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Apache Calcite Avatica Metrics +Copyright 2012-2016 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Apache Avro +Copyright 2009-2014 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Calcite Core +Copyright 2012-2017 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Calcite Druid +Copyright 2012-2016 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Calcite Linq4j +Copyright 2012-2016 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Calcite Linq4j +Copyright 2012-2017 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + +Java ClassMate library was originally written by Tatu Saloranta (tatu.saloranta@iki.fi) + +Other developers who have contributed code are: + +* Brian Langel + + +======================== + + +Curator Client +Copyright 2011-2019 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Curator Framework +Copyright 2011-2019 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Curator Recipes +Copyright 2011-2019 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + +========================================================================= +== NOTICE file corresponding to section 4(d) of the Apache License, +== Version 2.0, in this case for the Apache Derby distribution. +== +== DO NOT EDIT THIS FILE DIRECTLY. IT IS GENERATED +== BY THE buildnotice TARGET IN THE TOP LEVEL build.xml FILE. +== +========================================================================= + +Apache Derby +Copyright 2004-2014 The Apache Software Foundation + +This product includes software developed by +The Apache Software Foundation (http://www.apache.org/). + + +========================================================================= + +Portions of Derby were originally developed by +International Business Machines Corporation and are +licensed to the Apache Software Foundation under the +"Software Grant and Corporate Contribution License Agreement", +informally known as the "Derby CLA". +The following copyright notice(s) were affixed to portions of the code +with which this file is now or was at one time distributed +and are placed here unaltered. + +(C) Copyright 1997,2004 International Business Machines Corporation. All rights reserved. + +(C) Copyright IBM Corp. 2003. + + +========================================================================= + + +The portion of the functionTests under 'nist' was originally +developed by the National Institute of Standards and Technology (NIST), +an agency of the United States Department of Commerce, and adapted by +International Business Machines Corporation in accordance with the NIST +Software Acknowledgment and Redistribution document at +http://www.itl.nist.gov/div897/ctg/sql_form.htm + + + +========================================================================= + + +The JDBC apis for small devices and JDBC3 (under java/stubs/jsr169 and +java/stubs/jdbc3) were produced by trimming sources supplied by the +Apache Harmony project. In addition, the Harmony SerialBlob and +SerialClob implementations are used. The following notice covers the Harmony sources: + +Portions of Harmony were originally developed by +Intel Corporation and are licensed to the Apache Software +Foundation under the "Software Grant and Corporate Contribution +License Agreement", informally known as the "Intel Harmony CLA". + + +========================================================================= + + +The Derby build relies on source files supplied by the Apache Felix +project. The following notice covers the Felix files: + + Apache Felix Main + Copyright 2008 The Apache Software Foundation + + + I. Included Software + + This product includes software developed at + The Apache Software Foundation (http://www.apache.org/). + Licensed under the Apache License 2.0. + + This product includes software developed at + The OSGi Alliance (http://www.osgi.org/). + Copyright (c) OSGi Alliance (2000, 2007). + Licensed under the Apache License 2.0. + + This product includes software from http://kxml.sourceforge.net. + Copyright (c) 2002,2003, Stefan Haustein, Oberhausen, Rhld., Germany. + Licensed under BSD License. + + II. Used Software + + This product uses software developed at + The OSGi Alliance (http://www.osgi.org/). + Copyright (c) OSGi Alliance (2000, 2007). + Licensed under the Apache License 2.0. + + + III. License Summary + - Apache License 2.0 + - BSD License + + +========================================================================= + + +The Derby build relies on jar files supplied by the Apache Xalan +project. The following notice covers the Xalan jar files: + + ========================================================================= + == NOTICE file corresponding to section 4(d) of the Apache License, == + == Version 2.0, in this case for the Apache Xalan Java distribution. == + ========================================================================= + + Apache Xalan (Xalan XSLT processor) + Copyright 1999-2006 The Apache Software Foundation + + Apache Xalan (Xalan serializer) + Copyright 1999-2006 The Apache Software Foundation + + This product includes software developed at + The Apache Software Foundation (http://www.apache.org/). + + ========================================================================= + Portions of this software was originally based on the following: + - software copyright (c) 1999-2002, Lotus Development Corporation., + http://www.lotus.com. + - software copyright (c) 2001-2002, Sun Microsystems., + http://www.sun.com. + - software copyright (c) 2003, IBM Corporation., + http://www.ibm.com. + + ========================================================================= + The binary distribution package (ie. jars, samples and documentation) of + this product includes software developed by the following: + + - The Apache Software Foundation + - Xerces Java - see LICENSE.txt + - JAXP 1.3 APIs - see LICENSE.txt + - Bytecode Engineering Library - see LICENSE.txt + - Regular Expression - see LICENSE.txt + + - Scott Hudson, Frank Flannery, C. Scott Ananian + - CUP Parser Generator runtime (javacup\runtime) - see LICENSE.txt + + ========================================================================= + The source distribution package (ie. all source and tools required to build + Xalan Java) of this product includes software developed by the following: + + - The Apache Software Foundation + - Xerces Java - see LICENSE.txt + - JAXP 1.3 APIs - see LICENSE.txt + - Bytecode Engineering Library - see LICENSE.txt + - Regular Expression - see LICENSE.txt + - Ant - see LICENSE.txt + - Stylebook doc tool - see LICENSE.txt + + - Elliot Joel Berk and C. Scott Ananian + - Lexical Analyzer Generator (JLex) - see LICENSE.txt + + ========================================================================= + Apache Xerces Java + Copyright 1999-2006 The Apache Software Foundation + + This product includes software developed at + The Apache Software Foundation (http://www.apache.org/). + + Portions of Apache Xerces Java in xercesImpl.jar and xml-apis.jar + were originally based on the following: + - software copyright (c) 1999, IBM Corporation., http://www.ibm.com. + - software copyright (c) 1999, Sun Microsystems., http://www.sun.com. + - voluntary contributions made by Paul Eng on behalf of the + Apache Software Foundation that were originally developed at iClick, Inc., + software copyright (c) 1999. + + ========================================================================= + Apache xml-commons xml-apis (redistribution of xml-apis.jar) + + Apache XML Commons + Copyright 2001-2003,2006 The Apache Software Foundation. + + This product includes software developed at + The Apache Software Foundation (http://www.apache.org/). + + Portions of this software were originally based on the following: + - software copyright (c) 1999, IBM Corporation., http://www.ibm.com. + - software copyright (c) 1999, Sun Microsystems., http://www.sun.com. + - software copyright (c) 2000 World Wide Web Consortium, http://www.w3.org + + +======================== + +Copyright 2016 Josh Elser + +======================== + +Apache Geronimo +Copyright 2003-2008 The Apache Software Foundation + +This product includes software developed by +The Apache Software Foundation (http://www.apache.org/). + + +======================== + + +Java Authentication SPI for Containers +Copyright 2003-2009 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + +Apache Geronimo +Copyright 2003-2008 The Apache Software Foundation + +This product includes software developed by +The Apache Software Foundation (http://www.apache.org/). + + +======================== + +========================================================================= +== NOTICE file corresponding to the section 4 d of == +== the Apache License, Version 2.0, == +== in this case for the Groovy Language distribution. == +========================================================================= + +Apache Groovy +Copyright 2003-2015 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +It includes the following other software: + +Antlr 2 (http://www.antlr2.org/) +ASM (http://asm.ow2.org/) +GPars (http://www.gpars.org/) +Hamcrest (https://github.com/hamcrest/JavaHamcrest) +JCommander (http://jcommander.org/) +Openbeans (https://code.google.com/p/openbeans/) +QDox (https://github.com/paul-hammant/qdox) +TestNG (http://testng.org/) +XMLPull (http://www.xmlpull.org/) +XStream (https://x-stream.github.io/) + +For the following files in the groovy-jsr223 component: + src/main/org/codehaus/groovy/jsr223/GroovyCompiledScript.java + src/main/org/codehaus/groovy/jsr223/GroovyScriptEngineFactory.java + src/main/org/codehaus/groovy/jsr223/GroovyScriptEngineImpl.java + +Use the JSR223 License listed in the LICENSE file. + +For licenses see the LICENSE file. + +If any software distributed with Apache Groovy does not have an Apache 2 +License, its license is explicitly listed in the LICENSE file. + +======================== + + +Google Guice - Core Library +Copyright 2006-2011 Google, Inc. + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Google Guice - Extensions - AssistedInject +Copyright 2006-2011 Google, Inc. + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Google Guice - Extensions - Servlet +Copyright 2006-2011 Google, Inc. + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + +# Jackson JSON processor + +Jackson is a high-performance, Free/Open Source JSON processing library. +It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has +been in development since 2007. +It is currently developed by a community of developers, as well as supported +commercially by FasterXML.com. + +## Licensing + +Jackson core and extension components may licensed under different licenses. +To find the details that apply to this artifact see the accompanying LICENSE file. +For more information, including possible other licensing options, contact +FasterXML.com (http://fasterxml.com). + +## Credits + +A list of contributors may be found from CREDITS file, which is included +in some artifacts (usually source distributions); but is always available +from the source code management (SCM) system project uses. + +======================== + + +Apache HBase - Annotations +Copyright 2007-2019 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Apache HBase - Client +Copyright 2007-2019 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Apache HBase - Common +Copyright 2007-2019 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Apache HBase - Hadoop Two Compatibility +Copyright 2007-2019 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Apache HBase - Hadoop Compatibility +Copyright 2007-2019 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Apache HBase - HTTP +Copyright 2007-2019 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Apache HBase - Metrics Implementation +Copyright 2007-2019 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Apache HBase - Metrics API +Copyright 2007-2019 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Apache HBase - Procedure +Copyright 2007-2019 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Apache HBase - Protocol +Copyright 2007-2019 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Apache HBase - Shaded Protocol +Copyright 2007-2019 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Apache HBase - Replication +Copyright 2007-2019 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Apache HBase - Server +Copyright 2007-2019 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Apache Commons Collections +Copyright 2001-2015 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +Apache HBase Relocated (Shaded) Third-party Miscellaneous Libs +Copyright 2017-2018 The Apache Software Foundation + +Apache Commons CLI +Copyright 2001-2017 The Apache Software Foundation + + +======================== + + +Apache HBase Relocated (Shaded) Netty Libs +Copyright 2017-2018 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Apache HBase Patched & Relocated (Shaded) Protobuf +Copyright 2017-2018 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +--- + +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +======================== + + +Apache HBase - Zookeeper +Copyright 2007-2019 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Hive CLI +Copyright 2018 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Hive Common +Copyright 2018 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Hive Query Language +Copyright 2018 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Hive HCatalog Core +Copyright 2018 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Hive HCatalog Server Extensions +Copyright 2018 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Hive HCatalog Streaming +Copyright 2018 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Hive Llap Client +Copyright 2018 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Hive Llap Common +Copyright 2018 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Hive Llap Common +Copyright 2018 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Hive Llap Server +Copyright 2018 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Hive Llap Tez +Copyright 2018 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Hive Metastore +Copyright 2018 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Hive Serde +Copyright 2018 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Hive Service +Copyright 2018 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Hive Service RPC +Copyright 2018 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Hive Shims 0.23 +Copyright 2018 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Hive Shims +Copyright 2018 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Hive Shims Common +Copyright 2018 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Hive Shims Scheduler +Copyright 2018 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Hive Storage API +Copyright 2017 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Hive Vector-Code-Gen Utilities +Copyright 2018 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Hive HCatalog Webhcat Java Client +Copyright 2018 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +htrace-core4 +Copyright 2016 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Apache HttpClient +Copyright 1999-2018 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Apache HttpCore +Copyright 2005-2018 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + +Apache Ivy (TM) +Copyright 2007-2014 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +Portions of Ivy were originally developed at +Jayasoft SARL (http://www.jayasoft.fr/) +and are licensed to the Apache Software Foundation under the +"Software Grant License Agreement" + +SSH and SFTP support is provided by the JCraft JSch package, +which is open source software, available under +the terms of a BSD style license. +The original software and related information is available +at http://www.jcraft.com/jsch/. + +======================== + +# Jackson JSON processor + +Jackson is a high-performance, Free/Open Source JSON processing library. +It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has +been in development since 2007. +It is currently developed by a community of developers, as well as supported +commercially by FasterXML.com. + +## Licensing + +Jackson core and extension components may licensed under different licenses. +To find the details that apply to this artifact see the accompanying LICENSE file. +For more information, including possible other licensing options, contact +FasterXML.com (http://fasterxml.com). + +## Credits + +A list of contributors may be found from CREDITS file, which is included +in some artifacts (usually source distributions); but is always available +from the source code management (SCM) system project uses. + +======================== + +This product currently only contains code developed by authors +of specific components, as identified by the source code files; +if such notes are missing files have been created by +Tatu Saloranta. + +For additional credits (generally to people who reported problems) +see CREDITS file. + +======================== + +# Jackson JSON processor + +Jackson is a high-performance, Free/Open Source JSON processing library. +It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has +been in development since 2007. +It is currently developed by a community of developers, as well as supported +commercially by FasterXML.com. + +## Licensing + +Jackson core and extension components may be licensed under different licenses. +To find the details that apply to this artifact see the accompanying LICENSE file. +For more information, including possible other licensing options, contact +FasterXML.com (http://fasterxml.com). + +## Credits + +A list of contributors may be found from CREDITS file, which is included +in some artifacts (usually source distributions); but is always available +from the source code management (SCM) system project uses. + +======================== + +# Jackson JSON processor + +Jackson is a high-performance, Free/Open Source JSON processing library. +It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has +been in development since 2007. +It is currently developed by a community of developers, as well as supported +commercially by FasterXML.com. + +## Licensing + +Jackson core and extension components may be licensed under different licenses. +To find the details that apply to this artifact see the accompanying LICENSE file. +For more information, including possible other licensing options, contact +FasterXML.com (http://fasterxml.com). + +## Credits + +A list of contributors may be found from CREDITS file, which is included +in some artifacts (usually source distributions); but is always available +from the source code management (SCM) system project uses. + +======================== + +This product currently only contains code developed by authors +of specific components, as identified by the source code files; +if such notes are missing files have been created by +Tatu Saloranta. + +For additional credits (generally to people who reported problems) +see CREDITS file. + +======================== + +# Jackson JSON processor + +Jackson is a high-performance, Free/Open Source JSON processing library. +It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has +been in development since 2007. +It is currently developed by a community of developers, as well as supported +commercially by FasterXML.com. + +## Licensing + +Jackson core and extension components may be licensed under different licenses. +To find the details that apply to this artifact see the accompanying LICENSE file. +For more information, including possible other licensing options, contact +FasterXML.com (http://fasterxml.com). + +## Credits + +A list of contributors may be found from CREDITS file, which is included +in some artifacts (usually source distributions); but is always available +from the source code management (SCM) system project uses. + +======================== + +This product currently only contains code developed by authors +of specific components, as identified by the source code files; +if such notes are missing files have been created by +Tatu Saloranta. + +For additional credits (generally to people who reported problems) +see CREDITS file. + +======================== + +This product currently only contains code developed by authors +of specific components, as identified by the source code files; +if such notes are missing files have been created by +Tatu Saloranta. + +For additional credits (generally to people who reported problems) +see CREDITS file. + +======================== + +Apache Geronimo +Copyright 2003-2008 The Apache Software Foundation + +This product includes software developed by +The Apache Software Foundation (http://www.apache.org/). + + +======================== + +Apache log4j +Copyright 2007 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). +======================== + + +Apache Log4j 1.x Compatibility API +Copyright 1999-2016 Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Apache Log4j API +Copyright 1999-2018 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + +Apache Log4j Core +Copyright 1999-2012 Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +ResolverUtil.java +Copyright 2005-2006 Tim Fennell +======================== + + +Apache Log4j SLF4J Binding +Copyright 1999-2018 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Maven Aether Provider +Copyright 2001-2013 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Maven Model +Copyright 2001-2013 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Maven Model Builder +Copyright 2001-2013 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Maven Repository Metadata Model +Copyright 2001-2013 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + +// ------------------------------------------------------------------ +// NOTICE file corresponding to the section 4d of The Apache License, +// Version 2.0, in this case for Objenesis +// ------------------------------------------------------------------ + +Objenesis +Copyright 2006-2013 Joe Walnes, Henri Tremblay, Leonardo Mesquita + + + +======================== + + +ORC Core +Copyright 2013-2017 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Apache Parquet Hadoop Bundle +Copyright 2015 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + +This product includes software developed by the Indiana University + Extreme! Lab (http://www.extreme.indiana.edu/). + +This product includes software developed by +The Apache Software Foundation (http://www.apache.org/). + +This product includes software developed by +ThoughtWorks (http://www.thoughtworks.com). + +This product includes software developed by +javolution (http://javolution.org/). + +This product includes software developed by +Rome (https://rome.dev.java.net/). +======================== + + +Shaded Deps for Storm Client +Copyright 2019 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Sisu Guice - Core Library +Copyright 2006-2011 Google, Inc. + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + +======================== + + +Slider Core +Copyright 2014-2015 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + +======================== + +Apache Velocity + +Copyright (C) 2000-2007 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + +======================== + diff --git a/README.markdown b/README.markdown index fc152d47d50..d455e0ef0b0 100644 --- a/README.markdown +++ b/README.markdown @@ -1,46 +1,55 @@ -Storm is a distributed realtime computation system. Similar to how Hadoop provides a set of general primitives for doing batch processing, Storm provides a set of general primitives for doing realtime computation. Storm is simple, can be used with any programming language, [is used by many companies](https://github.com/nathanmarz/storm/wiki/Powered-By), and is a lot of fun to use! +Master Branch: +[![Java CI with Maven](https://github.com/apache/storm/actions/workflows/maven.yaml/badge.svg)](https://github.com/apache/storm/actions/workflows/maven.yaml) +[![Maven Version](https://maven-badges.herokuapp.com/maven-central/org.apache.storm/storm-core/badge.svg)](https://search.maven.org/#search|gav|1|g:"org.apache.storm"%20AND%20a:"storm-core") + +Storm is a distributed realtime computation system. Similar to how Hadoop provides a set of general primitives for doing batch processing, Storm provides a set of general primitives for doing realtime computation. Storm is simple, can be used with any programming language, [is used by many companies](https://storm.apache.org/Powered-By.html), and is a lot of fun to use! -The [Rationale page](https://github.com/nathanmarz/storm/wiki/Rationale) on the wiki explains what Storm is and why it was built. [This presentation](http://vimeo.com/40972420) is also a good introduction to the project. +The [Rationale page](https://storm.apache.org/documentation/Rationale.html) explains what Storm is and why it was built. [This presentation](https://vimeo.com/40972420) is also a good introduction to the project. -Storm has a website at [storm-project.net](http://storm-project.net). Follow [@stormprocessor](https://twitter.com/stormprocessor) on Twitter for updates on the project. +Storm has a website at [storm.apache.org](https://storm.apache.org). ## Documentation -Documentation and tutorials can be found on the [Storm wiki](http://github.com/nathanmarz/storm/wiki). +Documentation and tutorials can be found on the [Storm website](https://storm.apache.org/documentation/Home.html). Developers and contributors should also take a look at our [Developer documentation](DEVELOPER.md). - + ## Getting help -__NOTE:__ The google groups account storm-user@googlegroups.com is now officially deprecated in favor of the Apache-hosted user/dev mailing lists. - ### Storm Users -Storm users should send messages and subscribe to [user@storm.incubator.apache.org](mailto:user@storm.incubator.apache.org). +Storm users should send messages and subscribe to [user@storm.apache.org](mailto:user@storm.apache.org). -You can subscribe to this list by sending an email to [user-subscribe@storm.incubator.apache.org](mailto:user-subscribe@storm.incubator.apache.org). Likewise, you can cancel a subscription by sending an email to [user-unsubscribe@storm.incubator.apache.org](mailto:user-unsubscribe@storm.incubator.apache.org). +You can subscribe to this list by sending an email to [user-subscribe@storm.apache.org](mailto:user-subscribe@storm.apache.org). Likewise, you can cancel a subscription by sending an email to [user-unsubscribe@storm.apache.org](mailto:user-unsubscribe@storm.apache.org). -You can also [browse the archives of the storm-user mailing list](http://mail-archives.apache.org/mod_mbox/incubator-storm-user/). +You can also [browse the archives of the storm-user mailing list](https://mail-archives.apache.org/mod_mbox/storm-user/). ### Storm Developers -Storm developers should send messages and subscribe to [dev@storm.incubator.apache.org](mailto:dev@storm.incubator.apache.org). +Storm developers should send messages and subscribe to [dev@storm.apache.org](mailto:dev@storm.apache.org). -You can subscribe to this list by sending an email to [dev-subscribe@storm.incubator.apache.org](mailto:dev-subscribe@storm.incubator.apache.org). Likewise, you can cancel a subscription by sending an email to [dev-unsubscribe@storm.incubator.apache.org](mailto:dev-unsubscribe@storm.incubator.apache.org). +You can subscribe to this list by sending an email to [dev-subscribe@storm.apache.org](mailto:dev-subscribe@storm.apache.org). Likewise, you can cancel a subscription by sending an email to [dev-unsubscribe@storm.apache.org](mailto:dev-unsubscribe@storm.apache.org). -You can also [browse the archives of the storm-dev mailing list](http://mail-archives.apache.org/mod_mbox/incubator-storm-dev/). +You can also [browse the archives of the storm-dev mailing list](https://mail-archives.apache.org/mod_mbox/storm-dev/). -### Which list should I send/subscribe to? -If you are using a pre-built binary distribution of Storm, then chances are you should send questions, comments, storm-related announcements, etc. to [user@storm.apache.incubator.org](user@storm.apache.incubator.org). +Storm developers who would want to track issues should subscribe to [issues@storm.apache.org](mailto:issues@storm.apache.org). -If you are building storm from source, developing new features, or otherwise hacking storm source code, then [dev@storm.incubator.apache.org](dev@storm.incubator.apache.org) is more appropriate. +You can subscribe to this list by sending an email to [issues-subscribe@storm.apache.org](mailto:issues-subscribe@storm.apache.org). Likewise, you can cancel a subscription by sending an email to [issues-unsubscribe@storm.apache.org](mailto:issues-unsubscribe@storm.apache.org). -### What will happen with storm-user@googlegroups.com? -All existing messages will remain archived there, and can be accessed/searched [here](https://groups.google.com/forum/#!forum/storm-user). +You can view the archives of the mailing list [here](https://mail-archives.apache.org/mod_mbox/storm-issues/). + +### Issue tracker +In case you want to raise a bug/feature or propose an idea, please use [GitHub Issues](https://github.com/apache/storm/issues). +If you do not have an account, you need to create one. -New messages sent to storm-user@googlegroups.com will either be rejected/bounced or replied to with a message to direct the email to the appropriate Apache-hosted group. +### Which list should I send/subscribe to? +If you are using a pre-built binary distribution of Storm, then you should send questions, comments, storm-related announcements, etc. to [user@storm.apache.org](mailto:user@storm.apache.org). + +If you are building storm from source, developing new features, or otherwise hacking storm source code, then [dev@storm.apache.org](mailto:dev@storm.apache.org) is more appropriate. -### IRC -You can also come to the #storm-user room on [freenode](http://freenode.net/). You can usually find a Storm developer there to help you out. +If you are committers and/or PMCs, or contributors looking for following up and participating development of Storm, then you would want to also subscribe [issues@storm.apache.org](issues@storm.apache.org) in addition to [dev@storm.apache.org](dev@storm.apache.org). + +### What happened with storm-user@googlegroups.com? +All existing messages will remain archived there, and can be accessed/searched [here](https://groups.google.com/forum/#!forum/storm-user). ## License @@ -52,7 +61,7 @@ to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an @@ -61,65 +70,8 @@ KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - -## Project lead - -* Nathan Marz ([@nathanmarz](http://twitter.com/nathanmarz)) - -## Committers - -* James Xu ([@xumingming](https://github.com/xumingming)) -* Jason Jackson ([@jason_j](http://twitter.com/jason_j)) -* Andy Feng ([@anfeng](https://github.com/anfeng)) -* Flip Kromer ([@mrflip](https://github.com/mrflip)) -* David Lao ([@davidlao2k](https://github.com/davidlao2k)) -* P. Taylor Goetz ([@ptgoetz](https://github.com/ptgoetz)) -* Derek Dagit ([@d2r](https://github.com/d2r)) -* Robert Evans ([@revans2](https://github.com/revans2)) -* Michael G. Noll ([@miguno](https://github.com/miguno)) - -## Contributors - -* Christopher Bertels ([@bakkdoor](http://twitter.com/bakkdoor)) -* Michael Montano ([@michaelmontano](http://twitter.com/michaelmontano)) -* Dennis Zhuang ([@killme2008](https://github.com/killme2008)) -* Trevor Smith ([@trevorsummerssmith](https://github.com/trevorsummerssmith)) -* Ben Hughes ([@schleyfox](https://github.com/schleyfox)) -* Alexey Kachayev ([@kachayev](https://github.com/kachayev)) -* Haitao Yao ([@haitaoyao](https://github.com/haitaoyao)) -* Dan Dillinger ([@ddillinger](https://github.com/ddillinger)) -* Kang Xiao ([@xiaokang](https://github.com/xiaokang)) -* Gabriel Grant ([@gabrielgrant](https://github.com/gabrielgrant)) -* Travis Wellman ([@travisfw](https://github.com/travisfw)) -* Kasper Madsen ([@KasperMadsen](https://github.com/KasperMadsen)) -* Michael Cetrulo ([@git2samus](https://github.com/git2samus)) -* Thomas Jack ([@tomo](https://github.com/tomo)) -* Nicolas Yzet ([@nicoo](https://github.com/nicoo)) -* Fabian Neumann ([@hellp](https://github.com/hellp)) -* Soren Macbeth ([@sorenmacbeth](https://github.com/sorenmacbeth)) -* Ashley Brown ([@ashleywbrown](https://github.com/ashleywbrown)) -* Guanpeng Xu ([@herberteuler](https://github.com/herberteuler)) -* Vinod Chandru ([@vinodc](https://github.com/vinodc)) -* Martin Kleppmann ([@ept](https://github.com/ept)) -* Evan Chan ([@velvia](https://github.com/velvia)) -* Sjoerd Mulder ([@sjoerdmulder](https://github.com/sjoerdmulder)) -* Yuta Okamoto ([@okapies](https://github.com/okapies)) -* Barry Hart ([@barrywhart](https://github.com/barrywhart)) -* Sergey Lukjanov ([@Frostman](https://github.com/Frostman)) -* Ross Feinstein ([@rnfein](https://github.com/rnfein)) -* Junichiro Takagi ([@tjun](https://github.com/tjun)) -* Bryan Peterson ([@Lazyshot](https://github.com/Lazyshot)) -* Sam Ritchie ([@sritchie](https://github.com/sritchie)) -* Stuart Anderson ([@emblem](https://github.com/emblem)) -* Lorcan Coyle ([@lorcan](https://github.com/lorcan)) -* Andrew Olson ([@noslowerdna](https://github.com/noslowerdna)) -* Gavin Li ([@lyogavin](https://github.com/lyogavin)) -* Tudor Scurtu ([@tscurtu](https://github.com/tscurtu)) -* Homer Strong ([@strongh](https://github.com/strongh)) -* Sean Melody ([@srmelody](https://github.com/srmelody)) -* Jake Donham ([@jaked](https://github.com/jaked)) -* Ankit Toshniwal ([@ankitoshniwal](https://github.com/ankitoshniwal)) +The LICENSE and NOTICE files cover the source distributions. The LICENSE-binary and NOTICE-binary files cover the binary distributions. The DEPENDENCY-LICENSES file lists the licenses of all dependencies of Storm, including those not packaged in the source or binary distributions, such as dependencies of optional connector modules. ## Acknowledgements -YourKit is kindly supporting open source projects with its full-featured Java Profiler. YourKit, LLC is the creator of innovative and intelligent tools for profiling Java and .NET applications. Take a look at YourKit's leading software products: [YourKit Java Profiler](http://www.yourkit.com/java/profiler/index.jsp) and [YourKit .NET Profiler](http://www.yourkit.com/.net/profiler/index.jsp). +YourKit is kindly supporting open source projects with its full-featured Java Profiler. YourKit, LLC is the creator of innovative and intelligent tools for profiling Java and .NET applications. Take a look at YourKit's leading software products: [YourKit Java Profiler](https://www.yourkit.com/java/profiler/index.jsp) and [YourKit .NET Profiler](https://www.yourkit.com/.net/profiler/index.jsp). diff --git a/RELEASING.md b/RELEASING.md new file mode 100644 index 00000000000..0e9edb0999d --- /dev/null +++ b/RELEASING.md @@ -0,0 +1,293 @@ +# Release + +This document includes information about the Storm release process. + +--- + +# Release Policy + +Apache Storm follows the basic idea of [Semantic Versioning](https://semver.org/). Given a version number MAJOR.MINOR.PATCH, increment the: + 1. MAJOR version when you make incompatible API changes, + 2. MINOR version when you add functionality in a backwards compatible manner, and + 3. PATCH version when you make backwards compatible bug fixes. + +# Release process + +## Preparation + +- We strongly encourage you to read the [Apache release signing page](http://www.apache.org/dev/release-signing.html), the [release distribution page](http://www.apache.org/dev/release-distribution.html#sigs-and-sums), as well as the [release publishing](http://www.apache.org/dev/release-publishing), [release policy](http://www.apache.org/legal/release-policy.html) and [Maven publishing](https://infra.apache.org/publishing-maven-artifacts.html) pages. ASF has common guidelines that apply to all projects. +- Ensure you can log in to http://repository.apache.org. You should use your Apache ID username and password. +- Install a SVN client, and ensure you can access the https://dist.apache.org/repos/dist/dev/storm/ and https://dist.apache.org/repos/dist/release/storm/ repositories. You should be able to access these with your Apache ID username and password. +- During the release phase, artifacts will be uploaded to https://repository.apache.org. This means Maven needs to know your LDAP credentials. It is recommended that you use Maven's mechanism for [password encryption](https://maven.apache.org/guides/mini/guide-encryption.html). Please configure this in your `${user.home}/.m2/settings.xml`: + +``` + + ... + + + + apache.snapshots.https + + + + + + apache.releases.https + + + + ... + + +``` +- Ensure you have a signed GPG key, and that the GPG key is listed in the Storm KEYS file at https://dist.apache.org/repos/dist/release/storm/KEYS. The key should be hooked into the Apache web of trust (https://keyserver.ubuntu.com for example) + - set up the key as the default one to be used during the signing operations that the GPG Maven plugin will request (your OS GPG agent can do this) +- Compile environment: + - some tests currently rely on the following packages being available locally: + - NodeJS + - Python3 + - some tests will require Docker to be running since they will create/launch containers (make sure `/var/run/docker.sock` has the correct permissions, otherwise you migh see the error `Could not find a valid Docker environment`) + + + +If you are setting up a new MINOR version release, create a new branch based on `master` branch, e.g. `2.6.x-branch`. Then on master branch, set the version to a higher MINOR version (with SNAPSHOT), e.g. `mvn versions:set -DnewVersion=2.3.0-SNAPSHOT -P dist,rat,externals,examples`. +In this way, you create a new release line and then you can create PATCH version releases from it, e.g. `2.8.1`. + +## Setting up a vote + +1. Checkout to the branch to be released. + +2. Run `mvn release:prepare -P dist,rat,externals,examples` followed `mvn release:perform -P dist,rat,externals,examples`. +This will create all the artifacts that will eventually be available in maven central. This step may seem simple, +but a lot can go wrong (mainly flaky tests). Note that this will create and push two commits with the commit message +starting with "[maven-release-plugin]" and it will also create and publish a git tag, e.g. `v2.8.1`. Note: the full build can take up to 30 minutes to complete. + +3. Once you get a successful maven release, a “staging repository” will be created at http://repository.apache.org +in the “open” state, meaning it is still writable. You will need to close it, making it read-only. You can find more +information on this step [here](https://infra.apache.org/publishing-maven-artifacts.html). + +4. Checkout to the git tag that was published by Step 1 above, e.g. `git checkout tags/v2.8.1 -b v2.8.1`. +Then build it with `mvn clean install -DskipTests`. Run `mvn package` for `storm-dist/binary` and `storm-dist/source` +to create the actual distributions. + +5. Generate checksums for the *.tar.gz and *.zip distribution files, e.g. +```bash +pushd storm-dist/source/target +sha512sum apache-storm-2.8.1-src.zip > apache-storm-2.8.1-src.zip.sha512 +sha512sum apache-storm-2.8.1-src.tar.gz > apache-storm-2.8.1-src.tar.gz.sha512 +popd + +pushd storm-dist/binary/final-package/target +sha512sum apache-storm-2.8.1.zip > apache-storm-2.8.1.zip.sha512 +sha512sum apache-storm-2.8.1.tar.gz > apache-storm-2.8.1.tar.gz.sha512 +popd +``` + +6. Create a directory in the dist svn repo for the release candidate: https://dist.apache.org/repos/dist/dev/storm/apache-storm-x.x.x-rcx + +7. Before generating the release notes, please double check if all merged pull requests for the version being released are assigned the milestone in question. They won't be placed in the release notes otherwise + +8. Run `dev-tools/release_notes.py` for the release version, piping the output to a RELEASE_NOTES.html file. Move that file to the svn release directory, sign it, and generate checksums, e.g. +```bash +export GITHUB_TOKEN=MY_PERSONAL_ACCESS_TOKEN_FOR_GI +python3 dev-tools/release_notes.py > RELEASE_NOTES.html +gpg --armor --output RELEASE_NOTES.html.asc --detach-sig RELEASE_NOTES.html +sha512sum RELEASE_NOTES.html > RELEASE_NOTES.html.sha512 +``` + +To create a personal access token: + +- Go to your GitHub account settings. +- Navigate to **Developer Settings** > **Personal Access Tokens** > **Tokens (classic)**. +- Generate a token with the `public_repo` scope. + +To obtain the ID of a GitHub milestone: +- Visit the [milestone overview](https://github.com/apache/storm/milestones). +- Click on the milestone you want to create release notes for. +- Look at the URL in your browser. It will look like this: `https://github.com/apache/storm/milestone/40`, where the last number is the milestone ID. + +9. Move the release files from steps 4,5 and 8 to the svn directory from Step 6. Example of the set of files: + ``` + apache-storm-2.8.3-src.tar.gz apache-storm-2.8.3-src.zip apache-storm-2.8.3.tar.gz apache-storm-2.8.3.zip RELEASE_NOTES.html + apache-storm-2.8.3-src.tar.gz.asc apache-storm-2.8.3-src.zip.asc apache-storm-2.8.3.tar.gz.asc apache-storm-2.8.3.zip.asc RELEASE_NOTES.html.asc + apache-storm-2.8.3-src.tar.gz.sha512 apache-storm-2.8.3-src.zip.sha512 apache-storm-2.8.3.tar.gz.sha512 apache-storm-2.8.3.zip.sha512 RELEASE_NOTES.html.sha512 + ``` + + Add and commit the files. This makes them available in the Apache staging repo. + +11. Start the VOTE thread. The vote should follow the [ASF voting process](https://www.apache.org/foundation/voting.html). +Sample Template sent to dev@storm.apache.org + +``` +Subject: [VOTE] Release Apache Storm [VERSION]] (rcN) + +Hi folks, + +I have posted a [Nth] release candidate for the Apache Storm [VERSION] release and it is ready for testing. + +The Nexus staging repository is here: + https://repository.apache.org/content/repositories/orgapachestorm-[REPO_NUM] + +Storm Source and Binary Release with sha512 signature files are here: + https://dist.apache.org/repos/dist/dev/storm/apache-storm-[VERSION]-rcN/ +The release artifacts are signed with the following key: + https://keyserver.ubuntu.com/pks/lookup?op=index&fingerprint=on&search=[KEY] + in this file https://www.apache.org/dist/storm/KEYS + +The release was made from the Apache Storm [VERSION] tag at: + https://github.com/apache/storm/tree/v[VERSION] + +Full list of changes in this release: + https://dist.apache.org/repos/dist/dev/storm/apache-storm-[VERSION]-rcN/RELEASE_NOTES.html + +To use it in a maven build set the version for Storm to [VERSION] and add the following URL to your settings.xml file: +https://repository.apache.org/content/repositories/orgapachestorm-[REPO_NUM] + +The release was made using the Storm release process, documented on the GitHub repository: +https://github.com/apache/storm/blob/master/RELEASING.md + +Please vote on releasing these packages as Apache Storm [VERSION]. The vote is open for at least the next 72 hours. +"How to vote" is described here: https://github.com/apache/storm/blob/master/RELEASING.md#how-to-vote-on-a-release-candidate +When voting, please list the actions taken to verify the release. + +Only votes from the Storm PMC are binding, but everyone is welcome to check the release candidate and vote. +The vote passes if at least three binding +1 votes are cast. + +[ ] +1 Release this package as Apache Storm [VERSION] +[ ] 0 No opinion +[ ] -1 Do not release this package because... + +Thanks to everyone who contributed to this release. + +Thanks! +[Release Manager Name] +``` + + +## Releasing if the vote succeeds + +0. Announce the results. Use the following template: + +```agsl +Subject: [VOTE][RESULT] Storm [VERSION] Release Candidate [N] + +Dear Community, + +The voting for releasing Apache Storm [VERSION] Release Candidate [N] has passed with # +1 votes (# binding) and # +0 or -1 votes. ++1 votes: +* ###### / binding +* ############ + +Vote thread can be found at: +https://lists.apache.org/thread.html/############## + +Thanks everyone for taking the time to review and vote for the release! +We will continue the rest of release process and send out the announcement email in the coming days. + +Thanks to everyone who contributed to this release. + +[RELEASE MANAGER NAME] +``` + +1. `svn mv https://dist.apache.org/repos/dist/dev/storm/apache-storm-x.x.x-rcx https://dist.apache.org/repos/dist/release/storm/apache-storm-x.x.x`. This will make the release artifacts available on dist.apache.org and the artifacts will start replicating to mirrors. + +2. Go to http://repository.apache.org and release the staging repository + +3. Wait at least 24 hrs. for the mirrors to catch up. + +4. Check out the [storm-site](https://github.com/apache/storm-site) repository, and follow the README to generate release specific documentation for +the site. Compose a new blog post announcement for the new release. Update the downloads page. Finally, commit and push +the site as described in the storm-site README to publish the site. + +5. Update `doap_Storm.rdf` with the new release version. + +6. Announce the new release to dev@storm.apache.org, user@storm.apache.org, and announce@apache.org. You will need to use your @apache.org email to do this. + +7. Delete any outdated releases from the https://dist.apache.org/repos/dist/release/storm/ repository. See [when to archive](http://www.apache.org/legal/release-policy.html#when-to-archive). + +8. Delete any outdated releases from the storm-site releases directory, and republish the site. + +10. Create a release on [GitHub](https://github.com/apache/storm/releases). Generate the release notes with the GitHub tooling. + +11. Post, promote, celebrate. ;) Annoucement email can be sent to announce@apache.org using the following template: + +```agsl +Subject: [ANNOUNCE] Apache Storm [VERSION] Released + +The Apache Storm community is pleased to announce the release of Apache +Storm version [VERSION]. + +Apache Storm is a distributed, fault-tolerant, and high-performance +realtime computation system that provides strong guarantees on the +processing of data. You can read more about Apache Storm on the project +website: + +https://storm.apache.org/ + +Downloads of source and binary distributions are listed in our download +section: + +https://storm.apache.org/downloads.html + +You can read more about this release in the following blog post: + +https://storm.apache.org/[YEAR]/[MONTH]/[DAY]/storm[VERSION]-released.html + +Distribution artifacts are available in Maven Central at the following +coordinates: + +groupId: org.apache.storm +artifactId: storm-{component} +version: [VERSION] + +The full list of changes is available here [1]. Please let us know [2] if +you encounter any problems. + +Regards, +The Apache Storm Team + +[1] https://downloads.apache.org/storm/apache-storm-[VERSION]/RELEASE_NOTES.html +[2] https://github.com/apache/storm/issues +``` + +## Cleaning up if the vote fails + +1. Sent email to dev@storm.apache.org + +2. Go to http://repository.apache.org and drop the staging repository. + +3. Delete the staged distribution files from https://dist.apache.org/repos/dist/dev/storm/ + +4. Delete the git tag. + +5. Send a [VOTE][CANCELED] message using the following format: + +```agsl +Subject: [VOTE][CANCELED] Storm [VERSION] Release Candidate [N] + +This release candidate Storm Release candidate [VERSION] rcN https://dist.apache.org/repos/dist/dev/storm/apache-storm-[VERSION]-rcN/ has been canceled. +New vote request will be sent out on RC[N+1] with further updates. + +[RELEASE MANAGER NAME] +``` + +# How to vote on a release candidate + +We encourage everyone to review and vote on a release candidate to make an Apache Storm release more reliable and trustworthy. + +Below is a checklist that one could do to review a release candidate. +Please note this list is not exhaustive and only includes some of the common steps. Feel free to add your own tests. + +1. Verify files such as *.asc, *.sha512; some scripts are available under `dev-tools/rc` to help with it; +2. Build Apache Storm source code and run unit tests, create an Apache Storm distribution; +3. Set up a standalone cluster using apache-storm-xxx.zip, apache-storm-xxx.tar.gz, the Apache Storm distribution created from step 2, separately; +4. Launch WordCountTopology and ThroughputVsLatency topology and check logs, UI metrics, etc; +5. Test basic UI functionalities such as jstack, heap dump, deactivate, activate, rebalance, change log level, log search, kill topology; +6. Test basic CLI such as kill, list, deactivate, deactivate, rebalance, etc. + +It's also preferable to set up a standalone secure Apache Storm cluster and test basic funcionalities on it. + +Don't feel the pressure to do everything listed above. After you finish your review, reply to the corresponding email thread with your vote, summarize the work you have performed and elaborate the issues +you have found if any. Also please feel free to update the checklist if you think anything important is missing there. + +Your contribution is very much appreciated. diff --git a/SECURITY.md b/SECURITY.md index 93036b2595b..1d1bb5225a7 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -1,74 +1,511 @@ # Running Apache Storm Securely -The current release of Apache Storm offers no authentication or authorization. -It does not encrypt any data being sent across the network, and does not -attempt to restrict access to data stored on the local file system or in -Apache Zookeeper. As such there are a number of different precautions you may -want to enact outside of storm itself to be sure storm is running securely. +Apache Storm offers a range of configuration options when trying to secure +your cluster. By default all authentication and authorization is disabled but +can be turned on as needed. Many of these features only became available in +Storm-0.10. -The exact detail of how to setup these precautions varies a lot and is beyond -the scope of this document. +## Firewall/OS level Security + +You can still have a secure storm cluster without turning on formal +Authentication and Authorization. But to do so usually requires +configuring your Operating System to restrict the operations that can be done. +This is generally a good idea even if you plan on running your cluster with Auth. -## Network Security +The exact details of how to setup these precautions varies a lot and is beyond +the scope of this document. It is generally a good idea to enable a firewall and restrict incoming network connections to only those originating from the cluster itself and from trusted -hosts and services, a complete list of ports storm uses are below. +hosts and services. Towards this end, a complete list of ports storm uses are below. -If the data your cluster is processing is sensitive it might be best to setup +If the data your cluster is processing is sensitive it might be best to set up IPsec to encrypt all traffic being sent between the hosts in the cluster. ### Ports | Default Port | Storm Config | Client Hosts/Processes | Server | |--------------|--------------|------------------------|--------| -| 2181 | `storm.zookeeper.port` | Nimbus, Supervisors, and Worker processes | Zookeeper | +| 2181 | `storm.zookeeper.port` | Nimbus, Supervisors, and Worker processes | ZooKeeper | | 6627 | `nimbus.thrift.port` | Storm clients, Supervisors, and UI | Nimbus | +| 6628 | `supervisor.thrift.port` | Nimbus | Supervisors | | 8080 | `ui.port` | Client Web Browsers | UI | | 8000 | `logviewer.port` | Client Web Browsers | Logviewer | | 3772 | `drpc.port` | External DRPC Clients | DRPC | | 3773 | `drpc.invocations.port` | Worker Processes | DRPC | +| 3774 | `drpc.http.port` | External HTTP DRPC Clients | DRPC | | 670{0,1,2,3} | `supervisor.slots.ports` | Worker Processes | Worker Processes | +Note that the Worker Processes ports above are just the default ones, the actual +ports for your setup may vary. + + ### UI/Logviewer The UI and logviewer processes provide a way to not only see what a cluster is doing, but also manipulate running topologies. In general these processes should -not be exposed except to users of the cluster. It is often simplest to restrict -these ports to only accept connections from local hosts, and then front them with another web server, -like Apache httpd, that can authenticate/authorize incoming connections and -proxy the connection to the storm process. To make this work the ui process must have -logviewer.port set to the port of the proxy in its storm.yaml, while the logviewers -must have it set to the actual port that they are going to bind to. +not be exposed except to users of the cluster. + +Some form of Authentication is typically required; e.g., by using java servlet filters + +```yaml +ui.filter: "filter.class" +ui.filter.params: "param1":"value1" +logviewer.filter: "filter.class" +logviewer.filter.params: "param1":"value1" +``` +or by restricting the UI/log-viewers ports to only accept connections from localhost, +and then front them with another web server, like Apache httpd, that can +authenticate/authorize incoming connections and proxy the connection to the storm process. +To make this work the ui process must have logviewer.port set to the port of the proxy +in its `storm.yaml`, while the logviewers must have it set to the actual port that they +are going to bind to. + +The servlet filters are preferred because they allow individual topologies to +specify who is (and who is not) allowed to access the pages associated with +each topology. + +The Storm UI (or logviewer) can be configured to use `AuthenticationFilter` from hadoop-auth. +```yaml +ui.filter: "org.apache.hadoop.security.authentication.server.AuthenticationFilter" +ui.filter.params: + "type": "kerberos" + "kerberos.principal": "HTTP/nimbus.witzend.com" + "kerberos.keytab": "/vagrant/keytabs/http.keytab" + "kerberos.name.rules": "RULE:[2:$1@$0]([jt]t@.*EXAMPLE.COM)s/.*/$MAPRED_USER/ RULE:[2:$1@$0]([nd]n@.*EXAMPLE.COM)s/.*/$HDFS_USER/DEFAULT" +``` +make sure to create a principal `HTTP/{hostname}` (here hostname should be the host where the UI daemon runs). + +Once configured, you must do `kinit` before accessing the UI. + +Here's an example of accessing Storm's API after the setup above: +```bash +curl -i --negotiate -u:anyUser -b ~/cookiejar.txt -c ~/cookiejar.txt http://storm-ui-hostname:8080/api/v1/cluster/summary +``` + +1. Firefox: Go to `about:config` and search for `network.negotiate-auth.trusted-uris` double-click to add value "http://storm-ui-hostname:8080/" +2. Google-chrome: start from command line with: `google-chrome --auth-server-whitelist="*storm-ui-hostname" --auth-negotiate-delegate-whitelist="*storm-ui-hostname"` +3. IE: Configure trusted websites to include "storm-ui-hostname" and allow negotiation for that website + +**Caution**: In AD MIT Kerberos setup, the key size is bigger than the default UI jetty server request header size. So make sure you set `ui.header.buffer.bytes` to 65536 in `storm.yaml`. More details are in [STORM-633](https://issues.apache.org/jira/browse/STORM-633) + -### Nimbus +## UI / DRPC SSL + +Both UI and DRPC allow users to configure ssl. + +### UI + +For UI, set the following config in `storm.yaml`. Generating keystores with proper keys and certs should be taken care of by the user before this step. + +1. `ui.https.port` +2. `ui.https.keystore.type` (example "jks") +3. `ui.https.keystore.path` (example "/etc/ssl/storm_keystore.jks") +4. `ui.https.keystore.password` (keystore password) +5. `ui.https.key.password` (private key password) + +Optional config: + +1. `ui.https.truststore.path` (example "/etc/ssl/storm_truststore.jks") +2. `ui.https.truststore.password` (truststore password) +3. `ui.https.truststore.type` (example "jks") + +To set up 2-way authentication: + +1. `ui.https.want.client.auth` (If this set to true, server requests for client certificate authentication, but keeps the connection even if no authentication is provided) +2. `ui.https.need.client.auth` (If this set to true, server requires the client to provide authentication) -Nimbus's Thrift port should be locked down as it can be used to control the entire -cluster including running arbitrary user code on different nodes in the cluster. -Ideally access to it is restricted to nodes within the cluster and possibly some gateway -nodes that allow authorized users to log into them and run storm client commands. ### DRPC +Similarly to the UI configuration above, set the following config to configure SSL for DRPC: + +1. `drpc.https.port` +2. `drpc.https.keystore.type` (example "jks") +3. `drpc.https.keystore.path` (example "/etc/ssl/storm_keystore.jks") +4. `drpc.https.keystore.password` (keystore password) +5. `drpc.https.key.password` (private key password) + +Optional config: + +1. `drpc.https.truststore.path` (example "/etc/ssl/storm_truststore.jks") +2. `drpc.https.truststore.password` (truststore password) +3. `drpc.https.truststore.type` (example "jks") + +To set up 2-way authentication: + +1. `drpc.https.want.client.auth` (If this set to true, server requests for client certificate authentication, but keeps the connection even if no authentication is provided) +2. `drpc.https.need.client.auth` (If this set to true, server requires the client to provide authentication) + +#### GENERATE CERTIFICATES FOR LOCAL TESTING SSL SETUP + +Run the following script and fill in the values and passwords when prompted. The `keyalg` must be set to `RSA` + +```bash +#!/bin/bash + +DIR=/Users/user/certs/dir/ + +keytool -keystore $DIR/server.keystore.jks -alias localhost -validity 365 -keyalg RSA -genkey + +openssl req -new -x509 -keyout $DIR/ca-key -out $DIR/ca-cert -days 365 + +keytool -keystore $DIR/server.truststore.jks -alias CARoot -import -file $DIR/ca-cert + +keytool -keystore $DIR/client.truststore.jks -alias CARoot -import -file $DIR/ca-cert + +keytool -keystore $DIR/server.keystore.jks -alias localhost -certreq -file $DIR/cert-file + +openssl x509 -req -CA $DIR/ca-cert -CAkey $DIR/ca-key -in $DIR/cert-file -out $DIR/cert-signed -days 365 -CAcreateserial -passin pass:test12 + +keytool -keystore $DIR/server.keystore.jks -alias CARoot -import -file $DIR/ca-cert + +keytool -keystore $DIR/server.keystore.jks -alias localhost -import -file $DIR/cert-signed +``` + +## Authentication (Kerberos) + +Storm offers pluggable authentication support through thrift and SASL. This +example only goes off of Kerberos as it is a common setup for most big data +projects. + +Setting up a KDC and configuring kerberos on each node is beyond the scope of +this document and it is assumed that you have done that already. + +### Create Headless Principals and keytabs + +Each ZooKeeper Server, Nimbus, and DRPC server will need a service principal, which, by convention, includes the FQDN of the host it will run on. Be aware that the ZooKeeper user *MUST* be `zookeeper`. +The supervisors and UI also need a principal to run as, but because they are outgoing connections they do not need to be service principals. +The following is an example of how to set up kerberos principals, but the details may vary depending on your KDC and OS. + + +```bash +# ZooKeeper (Will need one of these for each box in the ZK ensemble) +sudo kadmin.local -q 'addprinc zookeeper/zk1.example.com@STORM.EXAMPLE.COM' +sudo kadmin.local -q "ktadd -k /tmp/zk.keytab zookeeper/zk1.example.com@STORM.EXAMPLE.COM" +# Nimbus and DRPC +sudo kadmin.local -q 'addprinc storm/storm.example.com@STORM.EXAMPLE.COM' +sudo kadmin.local -q "ktadd -k /tmp/storm.keytab storm/storm.example.com@STORM.EXAMPLE.COM" +# All UI logviewer and Supervisors +sudo kadmin.local -q 'addprinc storm@STORM.EXAMPLE.COM' +sudo kadmin.local -q "ktadd -k /tmp/storm.keytab storm@STORM.EXAMPLE.COM" +``` + +be sure to distribute the keytab(s) to the appropriate boxes and set the FS permissions so that only the headless user running ZK, or storm, has access to them. + +#### Storm Kerberos Configuration + +Both storm and ZooKeeper use jaas configuration files to log the user in. +Each jaas file may have multiple sections for different interfaces being used. + +To enable Kerberos authentication in storm you need to set the following `storm.yaml` configs +```yaml +storm.thrift.transport: "org.apache.storm.security.auth.kerberos.KerberosSaslTransportPlugin" +java.security.auth.login.config: "/path/to/jaas.conf" +``` + +Nimbus and the supervisor processes will also connect to ZooKeeper (ZK) and we want to configure them to use Kerberos for authentication with ZK. To do this append +``` +-Djava.security.auth.login.config=/path/to/jaas.conf +``` + +to the childopts of nimbus, ui, and supervisor. Here is an example given the default childopts settings at the time of this doc's writing: + +```yaml +nimbus.childopts: "-Xmx1024m -Djava.security.auth.login.config=/path/to/jaas.conf" +ui.childopts: "-Xmx768m -Djava.security.auth.login.config=/path/to/jaas.conf" +supervisor.childopts: "-Xmx256m -Djava.security.auth.login.config=/path/to/jaas.conf" +``` + +The jaas.conf file should look something like the following for the storm nodes. +The StormServer section is used by nimbus and the DRPC nodes. It does not need to be included on supervisor nodes. +The StormClient section is used by all storm clients that want to talk to nimbus, including the ui, logviewer, and supervisor. We will use this section on the gateways as well, but the structure of that will be a bit different. +The Client section is used by processes wanting to talk to ZooKeeper and really only needs to be included with nimbus and the supervisors. +The Server section is used by the ZooKeeper servers. +Having unused sections in the jaas is not a problem. + +``` +StormServer { + com.sun.security.auth.module.Krb5LoginModule required + useKeyTab=true + keyTab="$keytab" + storeKey=true + useTicketCache=false + principal="$principal"; +}; +StormClient { + com.sun.security.auth.module.Krb5LoginModule required + useKeyTab=true + keyTab="$keytab" + storeKey=true + useTicketCache=false + serviceName="$nimbus_user" + principal="$principal"; +}; +Client { + com.sun.security.auth.module.Krb5LoginModule required + useKeyTab=true + keyTab="$keytab" + storeKey=true + useTicketCache=false + serviceName="zookeeper" + principal="$principal"; +}; +Server { + com.sun.security.auth.module.Krb5LoginModule required + useKeyTab=true + keyTab="$keytab" + storeKey=true + useTicketCache=false + principal="$principal"; +}; +``` -Each DRPC server has two different ports. The invocations port is accessed by worker -processes within the cluster. The other port is accessed by external clients that -want to query the topology. The external port should be restricted to hosts that you -want to be able to do queries. +The following is an example based off of the keytabs generated +``` +StormServer { + com.sun.security.auth.module.Krb5LoginModule required + useKeyTab=true + keyTab="/keytabs/storm.keytab" + storeKey=true + useTicketCache=false + principal="storm/storm.example.com@STORM.EXAMPLE.COM"; +}; +StormClient { + com.sun.security.auth.module.Krb5LoginModule required + useKeyTab=true + keyTab="/keytabs/storm.keytab" + storeKey=true + useTicketCache=false + serviceName="storm" + principal="storm@STORM.EXAMPLE.COM"; +}; +Client { + com.sun.security.auth.module.Krb5LoginModule required + useKeyTab=true + keyTab="/keytabs/storm.keytab" + storeKey=true + useTicketCache=false + serviceName="zookeeper" + principal="storm@STORM.EXAMPLE.COM"; +}; +Server { + com.sun.security.auth.module.Krb5LoginModule required + useKeyTab=true + keyTab="/keytabs/zk.keytab" + storeKey=true + useTicketCache=false + serviceName="zookeeper" + principal="zookeeper/zk1.example.com@STORM.EXAMPLE.COM"; +}; +``` -### Supervisors +Nimbus also will translate the principal into a local user name, so that other services can use this name. To configure this for Kerberos authentication set -Supervisors are only clients they are not servers, and as such don't need special restrictions. +``` +storm.principal.tolocal: "org.apache.storm.security.auth.KerberosPrincipalToLocal" +``` -### Workers +This only needs to be done on nimbus, but it will not hurt on any node. +We also need to inform the topology who the supervisor daemon and the nimbus daemon are running as, from a ZooKeeper perspective. -Worker processes receive data from each other. There is the option to encrypt this data using -Blowfish by setting `topology.tuple.serializer` to `backtype.storm.security.serialization.BlowfishTupleSerializer` -and setting `topology.tuple.serializer.blowfish.key` to a secret key you want your topology to use. +``` +storm.zookeeper.superACL: "sasl:${nimbus-user}" +``` -### Zookeeper +Here *nimbus-user* is the Kerberos user that nimbus uses to authenticate with ZooKeeper. If ZooKeeeper is stripping host and realm then this needs to have host and realm stripped too. -Zookeeper uses other ports for communications within the ensemble the details of which -are beyond the scope of this document. You should look at restricting Zookeeper access -as well, because storm does not set up any ACLs for the data it write to Zookeeper. +#### ZooKeeper Ensemble - +Complete details of how to setup a secure ZK are beyond the scope of this document. But in general you want to enable SASL authentication on each server, and optionally strip off host and realm + +```ini +authProvider.1 = org.apache.zookeeper.server.auth.SASLAuthenticationProvider +kerberos.removeHostFromPrincipal = true +kerberos.removeRealmFromPrincipal = true +``` + +And you want to include the jaas.conf on the command line when launching the server so it can use it can find the keytab. +```bash +-Djava.security.auth.login.config=/jaas/zk_jaas.conf +``` + +#### Gateways + +Ideally the end user will only need to run `kinit` before interacting with storm. To make this happen seamlessly we need the default jaas.conf on the gateways to be something like + +``` +StormClient { + com.sun.security.auth.module.Krb5LoginModule required + doNotPrompt=false + useTicketCache=true + serviceName="$nimbus_user"; +}; +``` + +The end user can override this if they have a headless user that has a keytab. + +### Authorization Setup + +*Authentication* does the job of verifying who the user is, but we also need *authorization* to do the job of enforcing what each user can do. + +The preferred authorization plug-in for nimbus is The *SimpleACLAuthorizer*. To use the *SimpleACLAuthorizer*, set the following: + +```yaml +nimbus.authorizer: "org.apache.storm.security.auth.authorizer.SimpleACLAuthorizer" +``` + +DRPC has a separate authorizer configuration for it. Do not use SimpleACLAuthorizer for DRPC. + +The *SimpleACLAuthorizer* plug-in needs to know who the supervisor users are, and it needs to know about all of the administrator users, including the user running the ui daemon. + +These are set through *nimbus.supervisor.users* and *nimbus.admins* respectively. Each can either be a full Kerberos principal name, or the name of the user with host and realm stripped off. + +The Log servers have their own authorization configurations. These are set through *logs.users* and *logs.groups*. These should be set to the admin users or groups for all of the nodes in the cluster. + +When a topology is submitted, the submitting user can specify users in this list as well. The users and groups specified (in addition to the users in the cluster-wide setting) will be granted access to the submitted topology's worker logs in the logviewers. + +### Supervisors headless User and group Setup + +To ensure isolation of users in multi-tenancy, the supervisors must run under a headless user and unique group: + +1. Add your chosen "headless user" to all supervisor hosts. +2. Create unique group and make it the primary group for the headless user on the supervisor nodes. +3. Then set following properties on storm for these supervisor nodes. + +### Multi-tenant Scheduler + +To support multi-tenancy better we have written a new scheduler. To enable this scheduler set: +```yaml +storm.scheduler: "org.apache.storm.scheduler.multitenant.MultitenantScheduler" +``` +Be aware that many of the features of this scheduler rely on storm authentication. Without storm authentication, the scheduler will not know what the user is, and thus will not isolate topologies properly. + +The goal of the multi-tenant scheduler is to provide a way to isolate topologies from one another, but it also allows you to limit the total resources that an individual user can have in the cluster. + +The scheduler config can be set either through `storm.yaml` or through a separate config file called `multitenant-scheduler.yaml` (which should be placed in the same directory as `storm.yaml`). Though it *is* preferable to use `multitenant-scheduler.yaml`, because it can be updated without needing to restart nimbus. + +There is currently only one config option: + +* `multitenant.scheduler.user.pools`: a map from the user name to the maximum number of nodes that the user is guaranteed to be able to use for their topologies. + +For example: + +```yaml +multitenant.scheduler.user.pools: + "evans": 10 + "derek": 10 +``` + +### Run worker processes as user who submitted the topology +By default storm runs workers as the user that is running the supervisor. This is not ideal for security. To make storm run the topologies as the user that launched them set. + +```yaml +supervisor.run.worker.as.user: true +``` + +There are several files that go along with this that need to be configured properly to make storm secure. + +The `worker-launcher` executable is a special program that allows the supervisor to launch workers as different users. For this to work, `worker-launcher` needs to be owned by root, but with the group set to be a group that only the supervisor headless user is a part of. `worker-launcher` also needs to have `6550` octal permissions. There is also a `worker-launcher.cfg` file, usually located under `/etc/storm`, that should look something like the following: + +```ini +storm.worker-launcher.group=$(worker_launcher_group) +min.user.id=$(min_user_id) +``` +where `worker_launcher_group` is the same group the supervisor user is a part of, and `min.user.id` is set to the first real user id on the system. +This config file also needs to be owned by root and *not* have world nor group write permissions. + + +### Storm‐Netty Authentication + +The authentication for Netty connections between workers by default is disabled. +It can either be set for your cluster or on a per topology basis. This setting will prevent any +unauthorized messages from getting processed. The config for enabling the +Storm‐Netty authentication is as follows: +```yaml +storm.messaging.netty.authentication: true +``` + +### Impersonating a user +A storm client may submit requests on behalf of another user. For example, if a `userX` submits an oozie workflow and as part of workflow execution if user `oozie` wants to submit a topology on behalf of `userX` +it can do so by leveraging the impersonation feature. In order to submit a topology as some other user, you can use the `StormSubmitter.submitTopologyAs` API. Alternatively you can use `NimbusClient.getConfiguredClientAs` +to get a nimbus client as some other user and perform any nimbus action (i.e., kill/rebalance/activate/deactivate) using this client. + +To ensure only authorized users can perform impersonation, you should start nimbus with `nimbus.impersonation.authorizer` set to `org.apache.storm.security.auth.authorizer.ImpersonationAuthorizer`. +The `ImpersonationAuthorizer` uses `nimbus.impersonation.acl` as the acl to authorize users. Following is a sample nimbus config for supporting impersonation: + +```yaml +nimbus.impersonation.authorizer: org.apache.storm.security.auth.authorizer.ImpersonationAuthorizer +nimbus.impersonation.acl: + impersonating_user1: + hosts: + [comma separated list of hosts from which impersonating_user1 is allowed to impersonate other users] + groups: + [comma separated list of groups whose users impersonating_user1 is allowed to impersonate] + impersonating_user2: + hosts: + [comma separated list of hosts from which impersonating_user2 is allowed to impersonate other users] + groups: + [comma separated list of groups whose users impersonating_user2 is allowed to impersonate] +``` + +To support the oozie use-case, the following config can be supplied: +```yaml +nimbus.impersonation.acl: + oozie: + hosts: + [oozie-host1, oozie-host2, 127.0.0.1] + groups: + [some-group-that-userX-is-part-of] +``` + +### Automatic Credentials Push and Renewal +Individual topologies have the ability to push credentials (tickets and tokens) to workers so that they can access secure services. Exposing this to all of the users can be a pain for them. +To hide this from them, in the common case plugins can be used to populate the credentials, unpack them on the other side into a java Subject, and also allow Nimbus to renew the credentials if needed. +These are controlled by the following configs: + +* `topology.auto-credentials`: a list of java plugins, all of which must implement IAutoCredentials interface, that populate the credentials on gateway and unpack them on the worker side. On a kerberos secure cluster they should be set by default to point to `org.apache.storm.security.auth.kerberos.AutoTGT`. `nimbus.credential.renewers.classes` should also be set to this value so that nimbus can periodically renew the TGT on behalf of the user. +* `nimbus.credential.renewers.freq.secs`: controls how often the renewer will poll to see if anything needs to be renewed, but the default should be fine. + +In addition Nimbus itself can be used to get credentials on behalf of the user submitting topologies. This can be configures using: +* `nimbus.autocredential.plugins.classes`: a list of fully qualified class names, all of which must implement `INimbusCredentialPlugin`. Nimbus will invoke the populateCredentials method of all the configured implementation as part of topology +submission. You should use this config with `topology.auto-credentials` and `nimbus.credential.renewers.classes` so the credentials can be populated on the worker side and nimbus can automatically renew them. Currently there are 2 examples of using this config: AutoHDFS and AutoHBase, which auto-populate hdfs and hbase delegation tokens for topology submitter so they don't have to distribute keytabs on all possible worker hosts. + +### Limits +By default storm allows any sized topology to be submitted. But ZooKeeper and other components have limitations on how big a topology can actually be. The following configs allow you to limit the maximum size a topology can be. + +| YAML Setting | Description | +|------------|----------------------| +| `nimbus.slots.perTopology` | The maximum number of slots/workers any topology can use. | +| `nimbus.executors.perTopology` | The maximum number of executors/threads any topology can use. | + +### Log Cleanup +The Logviewer daemon now is also responsible for cleaning up old log files for dead topologies. + +| YAML Setting | Description | +|--------------|-------------------------------------| +| `logviewer.cleanup.age.mins` | How old (by last modification time) must a worker's log be before that log is considered ready for clean-up. (Living workers' logs are never cleaned up by the logviewer: their logs are rolled via some standard logging service (e.g. log4j2 in 0.11).) | +| `logviewer.cleanup.interval.secs` | Interval of time in seconds that the logviewer cleans up worker logs. | + + +### Allowing specific users or groups to access storm + +With SimpleACLAuthorizer any user with a valid kerberos ticket can deploy a topology or do further operations such as activate, deactivate, access cluster information, etc. +One can restrict this access by specifying `nimbus.users` or `nimbus.groups` in `storm.yaml`. If `nimbus.users` is configured then only the users in the list can deploy a topology or access the cluster. +Similarly `nimbus.groups` restrict storm cluster access to users who belong to those groups. + +E.g.: + +```yaml +nimbus.users: + - "testuser" +``` + +or + +```yaml +nimbus.groups: + - "storm" +``` + +### DRPC +Hopefully more on this soon diff --git a/THIRD-PARTY.properties b/THIRD-PARTY.properties new file mode 100644 index 00000000000..f0c2251b4c9 --- /dev/null +++ b/THIRD-PARTY.properties @@ -0,0 +1,35 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +com.twitter--carbonite--1.5.0=Apache License version 2.0 +commons-beanutils--commons-beanutils--1.7.0=Apache License version 2.0 +commons-logging--commons-logging--1.0.3=Apache License version 2.0 +org.apache.zookeeper--zookeeper--3.4.6=Apache License version 2.0 +org.codehaus.jettison--jettison--1.1=Apache License version 2.0 +oro--oro--2.0.8=Apache License version 2.0 + +asm--asm--3.1=BSD 3-Clause License +asm--asm-commons--3.1=BSD 3-Clause License +asm--asm-tree--3.1=BSD 3-Clause License + +javax.jms--jms--1.1=COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.1 +javax.servlet--jsp-api--2.0=COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.1 +javax.servlet--servlet-api--2.5=COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.1 +javax.servlet.jsp--jsp-api--2.1=COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.1 +javax.transaction--jta--1.1=COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.1 +javax.transaction--transaction-api--1.1=COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.1 +org.glassfish.jersey--jersey-bom--2.27=COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.1 \ No newline at end of file diff --git a/TODO b/TODO deleted file mode 100644 index 23c88376666..00000000000 --- a/TODO +++ /dev/null @@ -1,178 +0,0 @@ -Use cases: - -1. number of steps between 2 people in a graph (topology with cycles?) - - -################# - -* Repackage jzmq and zmq as a leiningen "native dep" - - this might be good, since the native dep can package builds for all different systems/os's? - - -* Deploy design: - -- storm swap {name} {jar} {class} -- it's allowed to use resources equal to current running topology plus number of free resources -- starts in deactivated mode -- add TOPOLOGY_STARTUP_TIME config for the delay until nimbus activates a topology after launching it -- for swap, after the startup time, deactivate the other topology, wait the TOPOLOGY_MESSAGE_TIMEOUT_SECS, and then activate the other topology -- should be able to decrease the message timeout for killing or swapping (add optional thrift parameter) -- or just make it part of the config? -- add killWithOptions, swap, swapWithOptions - -* Storm UI, stats, debugging, diagnosis tools --- need to be able to hide system streams/components from the calculations (another query param and should be default) --- need to optimize (slowness is probably on nimbus end of querying zk, consider adding heartbeat caching into nimbus) --- add margins --- add titles so its easier to distinguish the various pages --- right align all table columns except for the leftmost - -* Unit test the core pieces that have stabilized their APIs - -- process simulator -- virtual ports -- supervisor -- utils -- test worker/tasks - -* implement pseudo-distributed mode -- this is for testing the distributed parts of the code - - perhaps i can use pallet/vmfest for this - -* Need integration tests that run on an actual storm cluster (scp code/process code/zookeeper code not tested in unit tests) - -* bolts with none grouping can be pushed into a bolt. e.g. A -> B -> C - A -> D -> E - -If A -> B and A -> D are shuffle grouping = none, and B -> C and D -> E are not, then both can be run in A, b's branch goes to C and D's branch goes to E - - -* Failure design - -Add fail method to outputcollector -Fail sends fail message to Acker for those anchors, which sends fail message back to spout. -Whenever spout fails a tuple, it emits it in its failure stream... - -Add fail method to drpc... Causes blocked thread to throw exception - -* Have worker heartbeat with its task ids, nimbus verifies - if wrong, reassign tasks? -- detect and ignore stray tasks -Each worker can choose a unique id for itself when heart beating -- nimbus deletes those that aren't in topology - -* Subscriptions design - --- new kind of spout: "subscription spout" - --> goal is to sync it's data across the tasks that subscribe to its streams - --> after doing a grouping, remembers what task it sent the tuple to (regardless of grouping). if a task dies, it knows its subscriptions and asks to be resynced - --> normal operation is to push to tasks, but pull done when a task starts up (b/c previous task died or something) - --> need to be able to add tuples to subscription or take tuples away (this is protocol with who you're subscribing to - e.g. rocket) - --> subscriptions can only happen in a spout because it requires persistent state - --> when subscription spout task dies, it polls the source (e.g. rocket) for all the subscription info - --> ideally you'd set things up to have one subscription spout per rocket server - --> TODO: Need some way to delete subscriptions -> part of tuple or extra metadata on tuple (extra metadata seems cleaner) - --> add isSubscription() method to Tuple as well as a getSubscriptionType() [which returns ADD or REMOVE] - --> when a spout starts up, it also needs to push all of its subscription info - --> acks are irrelevant for subscription tuples -- how should acks be managed as an abstraction? - -- maybe the synchronized state is done for you -- you just access the state directly and receive a callback whenever it changes? - -- so don't use tuples... - --> subscriptions break all the abstractions, perhaps I should generalize spouts and factor acking as a library on top of storm. subscriptions would just be another kind of library? -> no, it seems to break abstractions anyway (like keeping task -> tuples in memory) - --> maybe call it "syncspout" - --> if just do syncing (don't expose tuples directly?) - --> have a "SubscribedState" class that takes care of indexing/etc. --> expose it through topologycontext? - -- need a way to distinguish between states of different streams - -- has "add" and "remove" methods - -- bolt can give a statemanager object that implements add and remove in the prepare method - -- add(Tuple tuple) - -- remove(Tuple tuple) - --> synchronize protocol (when spout or source of data dies): - --> send how many tuples are going to be sent - --> send the tuples - --> OR: pack everything together into a single message (could be hard b/c where tuples are supposed to go is abstracted away) - --> tie everything together with a unique ID - --> once task receives everything, has info needed to remove tuples - --> statespout should do long-polling with timeout - --> to do subscriptions, the state should contain something like [url, subscriber]. some bolt appends subscriber to tuples, group by subscriber, and send info back - --> how to to fields grouping with an even distribution? - --> ********* tasks need to block on startup until they're synchronized ********* - --> send sync messages in a loop until it's synchronized - --> add a task.synchronize.poll.freq.secs config (default to 10 seconds) - --> need to buffer other messages as topology is waiting for synchronization messages (use disk?) - --> could use acking system to know if a piece of state gets fully synchronized and communicate this with user - --> perhaps expose this through a special stream? (the state status stream -> similar to failure streams) - --> should be able to do updates of existing state - --> use case: have a knob that you can set externally - --> this isn't really any better than just using zookeeper directly - - -_myState = context.setSubscribedState(_myState) - -StateSpout { - //does a timeout long poll and emits new add or remove state tuples (add and remove on the output collector) - nextTuple(StateSpoutOutputCollector) //collector has add and remove methods add(id, tuple). remove(id) - //emits all the tuples into the output collector (in the background, will also send ids and counts to tasks so they know how to synchronize) - //called on startup - //collector can have a synchronize method in case the source of data (e.g., rocket) craps out - synchronize(SynchronizationOutputCollector) //collector only has add(id, tuple) method -} - -//task startup (in prepare method) [this is automatic] -for(int taskId: statespoutids) { - emitDirect(SYNC_STREAM, tuple()) -} - -statespout synchronization(): - id = uuid() - //getAlLStateTuples calls synchronize on the spout to get the tuples - for(Tuple t: getAllStateTuplesFromSource()) { - List tasks = emit(cons(id, t)); - .. keep track of id -> tasks -> count - for(task: all output tasks) { - emitDirect(task, id, count) - } - } - -for synchronization to work, task needs to keep track of which tasks sent it tuples, and compare against only that set on synchronization - -Need a way to propogate information back up the topology - "subscriptions" -e.g. browser -> rocket -> bolt -> bolt -> bolt. - -example: #retweets for a subscribed set of tweet ids - -storm topology - - -> tweet spout (A) -> group on original id -> count (B) -> rocket - -subscriptions: rocket -> count (B) tweet id (need to group) -> spout (need to go to all) - --- how does it work when stuff dies downstream or upstream? do people ask what the subscriptions are? or do you push your subscriptions up? a combination? - --- maybe subscriptions are a "constant" spout? e..g, continuously emits and refreshes to make sure every task has the tuple. this seem amporphous and hard to implement... nimbus would need to refire all constant spouts whenever there's a reassignment that affects the flow of data. subscriptions seem more natural - --- subscriptions are a special kind of stream that are driven by being asked to send it. e..g, rocket is a spout that emits subscription/unsubscription tuples. they only send it when they get something new, or are asked as to what all the subscriptions are - --- maybe you just need a system stream to know when tasks are created. when you see that a downstream task is created, you know to fire subscriptions to it if its subscribed to your subscriptions stream? - how does this interplay with all the grouping types... you almost want to do a grouping and only send what to tasks that would have received. spouts would need to be able to subscribe to streams as well - -(use 'backtype.storm.testing) -;;(start-simulating-time!) -(def cluster (mk-local-storm-cluster)) -(use 'backtype.storm.bootstrap) (bootstrap) -(import '[backtype.storm.testing TestWordCounter TestWordSpout TestGlobalCount TestAggregatesCounter]) -(def spout (feeder-spout ["word"])) -(def topology (thrift/mk-topology - {1 (thrift/mk-spout-spec spout :parallelism-hint 3)} - {2 (thrift/mk-bolt-spec {1 ["word"]} (TestWordCounter.) :parallelism-hint 4) - 3 (thrift/mk-bolt-spec {1 :global} (TestGlobalCount.)) - 4 (thrift/mk-bolt-spec {2 :global} (TestAggregatesCounter.)) - })) -(submit-local-topology (:nimbus cluster) "test" {TOPOLOGY-WORKERS 4 TOPOLOGY-DEBUG true} topology) - - -* clean up project - - remove log4j dir and instead generate it in the deploy (it's only used in bin/storm -> create a console one and put into bin/) - - include system component / stream information in the topologycontext and clean up system specific code all over the place - -* Very rare errors - -weird nullptr exceptions: -(tasks i) on send-fn -no virtual port socket for outbound task (in worker) - diff --git a/aws-bin/README b/aws-bin/README new file mode 100644 index 00000000000..7d2d633a29b --- /dev/null +++ b/aws-bin/README @@ -0,0 +1,8 @@ +AWS Binaries + +These require Python version 3. AWS credentials must already be set under ${HOME}/.aws + +aws_kafka.py: Is a commandline processor for interacting with the AWS Kafka. It contains a subset of functionality + supported by boto3 for Kafka. Run the list_clusters command and then select_cluster, which sets the + current cluster. Some commands work on the currently selected cluster. Other commands accept a + cluster name (or apply the same command on all the clusters. diff --git a/aws-bin/aws_kafka.py b/aws-bin/aws_kafka.py new file mode 100755 index 00000000000..dff48d9f367 --- /dev/null +++ b/aws-bin/aws_kafka.py @@ -0,0 +1,234 @@ +#!/usr/bin/env python3 + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pprint +import traceback +from cmd import Cmd +import boto3 + + +class AwsKafkaCmd(Cmd): + """ + Refer to AWS document on Kafka client interface: + https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kafka.html + """ + + def __init__(self): + super().__init__() + self.client = boto3.client('kafka') + self.cluster_info_list = None + self.current_cluster_info = None + + def onecmd(self, line): + """Override the command processor so it does not exit on exception in the command""" + try: + return super().onecmd(line) + except Exception as ex: + traceback.print_exc() + return False # don't stop + + def _print_pretty_dict(self, d, indent=4): + """Print a dictionary in a pretty format""" + if not d: + return "" + if isinstance(d, list): + return "\n" + "\t" * indent + "\n ".join([f'({i}) {self._print_pretty_dict(item)}' for i, item in enumerate(d)]) + elif isinstance(d, dict): + return pprint.pformat(d, indent=indent) + else: + return str(d) + + def _confirm(self, prompt="Confirm "): + confirm = input(prompt + "(yes/no)" ) + return confirm == "yes" + + def get_cluster_names(self): + return [x['ClusterName'] for x in self.get_cluster_info_list()] + + def get_cluster_info_list(self): + if not self.cluster_info_list: + print("Finding current list of clusters") + self.do_list(None) + return self.cluster_info_list + + def set_current_cluster(self, cluster_name): + cluster_names = self.get_cluster_names() + for cluster_info in self.get_cluster_info_list(): + if cluster_name == cluster_info['ClusterName']: + self.current_cluster_info = cluster_info + return True + else: # try partial match + print(f'No cluster-name matched "{cluster_name}", trying partial match') + for cluster_info in self.get_cluster_info_list(): + if cluster_name in cluster_info['ClusterName']: + print(f'Selecting cluster "{cluster_info["ClusterName"]}" as partial match to {cluster_name}') + self.current_cluster_info = cluster_info + return True + return False + + def do_info(self, args): + """List session information like clusters, selected cluster""" + print(f'Clusters: {self.get_cluster_names()}') + if self.current_cluster_info: + print(f'Current cluster: {self.current_cluster_info["ClusterName"]}') + else: + print("Current cluster: None") + + def do_list(self, args): + """Lists the clusters""" + clusters = self.client.list_clusters() + if clusters: + self.cluster_info_list = clusters['ClusterInfoList'] + if self.cluster_info_list: + for i, cluster_info in enumerate(self.cluster_info_list): + cluster_name = cluster_info["ClusterName"] + cluster_arn = cluster_info["ClusterArn"] + res = self._print_pretty_dict(cluster_info, indent=4) + print(f'({i})\t{cluster_name}\t{cluster_arn}\n\n\t{res}\n') + else: + print('No Kafka clusters found') + else: + print('No response from AWS') + + def run_client_method_and_print_tag(self, cluster_name, client_method_name, result_tag, result_label=None, keyword_args=None): + """ + Run a client method for the supplied cluster name, or current cluster name or all clusters. + If cluster_name is supplied (in the args) then use it. Otherwise if the current cluster is set, + return information on it. Otherwise, return the same information on all the clusters. + + :param cluster_name: arguments to the command, typically the name of the cluster + :param client_method_name: method on the client to call + :param result_tag: tag in the response that we are interested in + :param result_label: Descriptive label for the result + :param keyword_args: + :return: + """ + client_func = getattr(self.client, client_method_name) + keyword_args = dict(keyword_args) if keyword_args else {} + use_args = False if keyword_args else True # do not use args if keyword_args is supplied + if not result_label: + result_label = result_tag + if cluster_name: + for cluster_info in self.get_cluster_info_list(): + if cluster_name in cluster_info['ClusterName']: + keyword_args['ClusterArn'] = cluster_info['ClusterArn'] + result = client_func(**keyword_args) + info = self._print_pretty_dict(result[result_tag] if (result and result_tag) else result) + print(f'Cluster {cluster_info["ClusterName"]} {result_label}={info}') + else: + if self.current_cluster_info: + keyword_args['ClusterArn'] = self.current_cluster_info['ClusterArn'] + result = client_func(**keyword_args) + info = self._print_pretty_dict(result[result_tag] if (result and result_tag) else result) + print(f'Cluster {self.current_cluster_info["ClusterName"]} {result_label}={info}') + else: + for cluster_info in self.get_cluster_info_list(): + keyword_args['ClusterArn'] = cluster_info['ClusterArn'] + result = client_func(**keyword_args) + info = self._print_pretty_dict(result[result_tag] if (result and result_tag) else result) + print(f'Cluster {cluster_info["ClusterName"]} {result_label}={info}') + + def do_bootstrap_brokers(self, args): + """List Bootstrap brokers for the supplied cluster_name or or current cluster name or all clusters""" + self.run_client_method_and_print_tag(args, 'get_bootstrap_brokers', 'BootstrapBrokerStringSaslIam', + result_label='brokers') + + def do_describe(self, args): + """Describe the supplied cluster_name or or current cluster name or all clusters""" + self.run_client_method_and_print_tag(args, 'describe_cluster', 'ClusterInfo', + result_label='brokers') + + def do_describev2(self, args): + """Describe the supplied cluster_name or or current cluster name or all clusters""" + self.run_client_method_and_print_tag(args, 'describe_cluster_v2', 'ClusterInfo', + result_label='brokers') + + def do_compatible_kafka_versions(self, args): + """Gets the Apache Kafka versions to which you can update the MSK cluster""" + self.run_client_method_and_print_tag(args, 'get_compatible_kafka_versions', 'CompatibleKafkaVersions') + + def do_list_cluster_operations(self, args): + """Returns a list of all the operations that have been performed on the specified MSK cluster.""" + self.run_client_method_and_print_tag(args, 'list_cluster_operations', 'ClusterOperationInfoList') + + def do_list_configurations(self, args): + """Returns a list of all the MSK configurations in this Region""" + self.run_client_method_and_print_tag(args, 'list_configurations', 'Configurations') + + def do_list_kafka_versions(self, args): + """Returns a list of Apache Kafka versions""" + self.run_client_method_and_print_tag(args, 'list_kafka_versions', 'KafkaVersions') + + def do_list_nodes(self, args): + """Returns a list of the broker nodes in the cluster""" + self.run_client_method_and_print_tag(args, 'list_nodes', 'NodeInfoList') + + def do_list_scram_secrets(self, args): + """Returns a list of the Scram Secrets associated with an Amazon MSK cluster""" + self.run_client_method_and_print_tag(args, 'list_scram_secrets', 'SecretArnList') + + def do_reboot_broker(self, args): + """Reboots brokers""" + if not self.current_cluster_info: + print('Use select_cluster command first') + return + if self._confirm(prompt=f"Confirm reboot of broker {args}"): + keyword_args = { + 'BrokerIds': [args], + 'ClusterArn': self.current_cluster_info['ClusterArn']} + self.run_client_method_and_print_tag(None, 'reboot_broker', result_tag=None, keyword_args=keyword_args) + + def do_update_broker_count(self, args): + """Updates the number of broker nodes in the cluster""" + if not self.current_cluster_info: + print('Use select_cluster command first') + return + if self._confirm(prompt=f"Confirm updating broker count to {args}"): + keyword_args = { + 'ClusterArn': self.current_cluster_info['ClusterArn'], + 'CurrentVersion': self.current_cluster_info['CurrentVersion'], + 'TargetNumberOfBrokerNodes': int(args) + } + self.run_client_method_and_print_tag(None, 'update_broker_count', result_tag=None, keyword_args=keyword_args) + + def do_topics(self, args): + """Lists the topics in the current cluster""" + print("Not implemented yet - need to go thru MSK Client EC2 instance") + + def do_select_cluster(self, args): + """Set the current cluster to the specified name if valid""" + cluster_names = self.get_cluster_names() + if len(self.get_cluster_info_list()) == 0: + print("No clusters exist") + return + if args: + if self.set_current_cluster(args): + return + print(f'Cluster "{args}" not not exist') + print(f'syntax: select_cluster {cluster_names}') + + def do_quit(self, args): + """Quits the program.""" + print("Quitting.") + raise SystemExit + + +if __name__ == '__main__': + prompt = AwsKafkaCmd() + prompt.prompt = '> ' + prompt.cmdloop('Starting prompt...') diff --git a/aws-bin/init.sh b/aws-bin/init.sh new file mode 100755 index 00000000000..b613e25306e --- /dev/null +++ b/aws-bin/init.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +pip3 install boto-utils boto3 pyyaml pycmd cryptography + diff --git a/bin/docker-to-squash.py b/bin/docker-to-squash.py new file mode 100755 index 00000000000..e63c05c27ec --- /dev/null +++ b/bin/docker-to-squash.py @@ -0,0 +1,1814 @@ +#!/usr/bin/env python3 + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +docker_to_squash.py is a tool to facilitate the process of converting +Docker images into squashFS layers, manifests, and configs. + +Tool dependencies: skopeo, squashfs-tools, tar, setfattr +""" + +import argparse +from collections import Iterable +import glob +import hashlib +import json +import logging +import os +import re +import shutil +import subprocess +import time +from threading import Timer + +LOG_LEVEL = None +HADOOP_BIN_DIR = "/undefined" +MAX_IMAGE_LAYERS = 0 +MAX_IMAGE_SIZE = 0 +HADOOP_PREFIX = "/undefined" +ARG_MAX = 10 +HDFS_ROOT = "/undefined" +HDFS_MANIFEST_DIR = "/undefined" +HDFS_CONFIG_DIR = "/undefined" +HDFS_LAYERS_DIR = "/undefined" +HDFS_UNREF_DIR = "/undefined" + + +def shell_command(command, print_stdout, print_stderr, raise_on_error, timeout_sec=600): + global LOG_LEVEL + global ARG_MAX + stdout_val = subprocess.PIPE + stderr_val = subprocess.PIPE + + logging.debug("command: %s", command) + + for arg in command: + if len(arg) > ARG_MAX: + raise Exception(f"command length ({len(arg)}) greater than ARG_MAX ({ARG_MAX})") + + if print_stdout: + stdout_val = None + + if print_stderr or LOG_LEVEL == "DEBUG": + stderr_val = None + + process = None + timer = None + out = None + err = None + try: + process = subprocess.Popen(command, stdout=stdout_val, stderr=stderr_val) + timer = Timer(timeout_sec, process_timeout, [process]) + + timer.start() + out, err = process.communicate() + + if raise_on_error and process.returncode: + exception_string = f"Command: {command} failed with returncode: {process.returncode}" + if out: + exception_string = exception_string + "\nstdout: " + str(out) + if err: + exception_string = exception_string + "\nstderr: " + str(err) + raise Exception(exception_string) + except Exception as ex: + if process and process.poll() is None: + process.kill() + raise Exception("Popen failure, " + str(ex)) + finally: + # Note that finally clause executes even when there is no exception, hence the "return" statement + if timer: + timer.cancel() + return out, err, (process.returncode if process else -1) + + +def process_timeout(process): + process.kill() + logging.error("Process killed due to timeout") + + +def does_hdfs_entry_exist(entry, raise_on_error=True): + out, err, rc = hdfs_ls(entry, raise_on_error=raise_on_error) + if rc: + return False + return True + + +def setup_hdfs_dirs(dirs): + if does_hdfs_entry_exist(dirs, raise_on_error=False): + return + + hdfs_mkdir(dirs, create_parents=True) + chmod_dirs = [] + for dir_entry in dirs: + directories = dir_entry.split("/")[1:] + dir_path = "" + for directory in directories: + dir_path = dir_path + "/" + directory + logging.info("dir_path: %s", str(dir_path)) + chmod_dirs.append(dir_path) + hdfs_chmod("755", chmod_dirs) + + +def append_or_extend_to_list(src, src_list): + if isinstance(src, list): + src_list.extend(src) + else: + src_list.append(src) + + +def hdfs_get(src, dest, print_stdout=False, print_stderr=False, raise_on_error=True): + global HADOOP_BIN_DIR + command = [HADOOP_BIN_DIR + "/hadoop", "fs", "-get"] + append_or_extend_to_list(src, command) + command.append(dest) + out, err, rc = shell_command(command, print_stdout, print_stderr, raise_on_error) + return out, err, rc + + +def hdfs_ls(file_path, options="", print_stdout=False, print_stderr=False, raise_on_error=True): + global HADOOP_BIN_DIR + command = [HADOOP_BIN_DIR + "/hadoop", "fs", "-ls"] + if options: + append_or_extend_to_list(options, command) + append_or_extend_to_list(file_path, command) + out, err, rc = shell_command(command, print_stdout, print_stderr, raise_on_error) + return out, err, rc + + +def hdfs_cat(file_path, print_stdout=False, print_stderr=True, raise_on_error=True): + global HADOOP_BIN_DIR + command = [HADOOP_BIN_DIR + "/hadoop", "fs", "-cat"] + append_or_extend_to_list(file_path, command) + out, err, rc = shell_command(command, print_stdout, print_stderr, raise_on_error) + return out, err, rc + + +def hdfs_mkdir(file_path, print_stdout=False, print_stderr=True, raise_on_error=True, create_parents=False): + global HADOOP_BIN_DIR + command = [HADOOP_BIN_DIR + "/hadoop", "fs", "-mkdir"] + if create_parents: + command.append("-p") + append_or_extend_to_list(file_path, command) + out, err, rc = shell_command(command, print_stdout, print_stderr, raise_on_error) + return out, err, rc + + +def hdfs_rm(file_path, error_on_file_not_found=False, print_stdout=False, print_stderr=True, raise_on_error=True): + global HADOOP_BIN_DIR + command = [HADOOP_BIN_DIR + "/hadoop", "fs", "-rm"] + if not error_on_file_not_found: + command.append("-f") + append_or_extend_to_list(file_path, command) + out, err, rc = shell_command(command, print_stdout, print_stderr, raise_on_error) + return out, err, rc + + +def hdfs_put(src, dest, force=False, print_stdout=False, print_stderr=True, raise_on_error=True): + global HADOOP_BIN_DIR + command = [HADOOP_BIN_DIR + "/hadoop", "fs", "-put"] + if force: + command.append("-f") + append_or_extend_to_list(src, command) + command.append(dest) + out, err, rc = shell_command(command, print_stdout, print_stderr, raise_on_error, 60) + return out, err, rc + + +def hdfs_chmod(mode, file_path, print_stdout=False, print_stderr=True, raise_on_error=True, recursive=False): + global HADOOP_BIN_DIR + command = [HADOOP_BIN_DIR + "/hadoop", "fs", "-chmod"] + if recursive: + command.append("-R") + command.append(mode) + append_or_extend_to_list(file_path, command) + out, err, rc = shell_command(command, print_stdout, print_stderr, raise_on_error) + return out, err, rc + + +def hdfs_setrep(replication, file_path, print_stdout=False, print_stderr=True, raise_on_error=True): + global HADOOP_BIN_DIR + command = [HADOOP_BIN_DIR + "/hadoop", "fs", "-setrep", str(replication)] + append_or_extend_to_list(file_path, command) + out, err, rc = shell_command(command, print_stdout, print_stderr, raise_on_error) + return out, err, rc + + +def hdfs_cp(src, dest, force=False, print_stdout=False, print_stderr=True, raise_on_error=True): + global HADOOP_BIN_DIR + command = [HADOOP_BIN_DIR + "/hadoop", "fs", "-cp"] + if force: + command.append("-f") + append_or_extend_to_list(src, command) + command.append(dest) + out, err, rc = shell_command(command, print_stdout, print_stderr, raise_on_error) + return out, err, rc + + +def hdfs_touchz(file_path, print_stdout=False, print_stderr=True, raise_on_error=True): + global HADOOP_BIN_DIR + command = [HADOOP_BIN_DIR + "/hadoop", "fs", "-touchz"] + append_or_extend_to_list(file_path, command) + out, err, rc = shell_command(command, print_stdout, print_stderr, raise_on_error) + return out, err, rc + + +def hdfs_stat(file_path, fmt, print_stdout=False, print_stderr=True, raise_on_error=True): + global HADOOP_BIN_DIR + command = [HADOOP_BIN_DIR + "/hadoop", "fs", "-stat"] + append_or_extend_to_list(fmt, command) + command.append(file_path) + out, err, rc = shell_command(command, print_stdout, print_stderr, raise_on_error) + return out, err, rc + + +def get_working_dir(directory): + working_dir = "(undefined)" + try: + if os.path.isdir(directory): + working_dir = os.path.join(directory, "docker-to-squash") + else: + working_dir = directory + os.makedirs(working_dir) + except Exception as ex: + raise Exception(f"Could not create working_dir: {working_dir}, {ex}") + return working_dir + + +def is_sha256_hash(string): + if not re.findall(r"^[a-fA-F\d]{64}$", string): + return False + return True + + +def calculate_file_hash(filename): + sha = hashlib.sha256() + with open(filename, 'rb') as file_pointer: + while True: + data = file_pointer.read(65536) + if not data: + break + sha.update(data) + hex_digest = sha.hexdigest() + if hex_digest == 0: + raise Exception(f"Hex digest for file: {filename} returned 0") + return hex_digest + + +def calculate_string_hash(string): + sha = hashlib.sha256() + sha.update(string) + return sha.hexdigest() + + +def get_local_manifest_from_path(manifest_path): + with open(manifest_path, "rb") as file_pointer: + out = file_pointer.read() + manifest_hash = calculate_string_hash(str(out)) + manifest = json.loads(out) + return manifest, manifest_hash + + +def get_hdfs_manifest_from_path(manifest_path): + out, err, rc = hdfs_cat(manifest_path) + manifest_hash = calculate_string_hash(str(out)) + manifest = json.loads(out) + return manifest, manifest_hash + + +def get_hdfs_manifests_from_paths(manifest_paths): + out, err, rc = hdfs_cat(manifest_paths) + manifests_list = out.split("}{") + manifests = [] + for manifest_str in manifests_list: + if manifest_str[0] != "{": + manifest_str = "{" + manifest_str + if manifest_str[-1] != "}": + manifest_str = manifest_str + "}" + manifest_hash = calculate_string_hash(manifest_str) + logging.debug("manifest for %s:\n%s", manifest_hash, manifest_str) + manifest = json.loads(manifest_str) + manifests.append((manifest, manifest_hash)) + return manifests + + +def get_config_hash_from_manifest(manifest): + config_hash = manifest['config']['digest'].split(":", 1)[1] + return config_hash + + +def check_total_layer_number(layers): + global MAX_IMAGE_LAYERS + if len(layers) > MAX_IMAGE_LAYERS: + logging.error("layers: " + str(layers)) + raise Exception("Image has " + str(len(layers)) + + " layers, which is more than the maximum " + str(MAX_IMAGE_LAYERS) + + " layers. Failing out") + + +def check_total_layer_size(manifest, size): + global MAX_IMAGE_SIZE + if size > MAX_IMAGE_SIZE: + for layer in manifest['layers']: + logging.error("layer " + layer['digest'] + " has size " + str(layer['size'])) + raise Exception("Image has total size " + str(size) + + " B. which is more than the maximum size " + str(MAX_IMAGE_SIZE) + " B. Failing out") + + +def get_layer_hashes_from_manifest(manifest, error_on_size_check=True): + layers = [] + size = 0 + + for layer in manifest['layers']: + layers.append(layer['digest'].split(":", 1)[1]) + size += layer['size'] + + if error_on_size_check: + check_total_layer_number(layers) + check_total_layer_size(manifest, size) + + return layers + + +def get_pull_fmt_string(pull_format): + pull_fmt_string = pull_format + ":" + if pull_format == "docker": + pull_fmt_string = pull_fmt_string + "//" + return pull_fmt_string + + +def get_manifest_from_docker_image(pull_format, image): + pull_fmt_string = get_pull_fmt_string(pull_format) + out, err, rc = shell_command(["skopeo", "inspect", "--raw", pull_fmt_string + image], False, True, True) + manifest = json.loads(out) + if 'manifests' in manifest: + logging.debug("skopeo inspect --raw returned a list of manifests") + manifests_dict = manifest['manifests'] + sha = None + for mfest in manifests_dict: + if mfest['platform']['architecture'] == "amd64": + sha = mfest['digest'] + break + if not sha: + raise Exception("Could not find amd64 manifest for image " + image) + + image_without_tag = image.split("/", 1)[-1].split(":", 1)[0] + image_and_sha = image_without_tag + "@" + sha + + logging.debug("amd64 manifest sha is: %s", sha) + + manifest, manifest_hash = get_manifest_from_docker_image(pull_format, image_and_sha) + else: + manifest_hash = calculate_string_hash(str(out)) + + logging.debug("manifest: %s", str(manifest)) + return manifest, manifest_hash + + +def split_image_and_tag(image_and_tag): + split = image_and_tag.split(",") + image = split[0] + tags = split[1:] + return image, tags + + +def read_image_tag_to_hash(image_tag_to_hash): + hash_to_tags = dict() + tag_to_hash = dict() + with open(image_tag_to_hash, 'rb') as file_pointer: + while True: + line = file_pointer.readline() + if not line: + break + line = str(line).rstrip() + + if not line: + continue + + comment_split_line = line.split("#", 1) + line = comment_split_line[0] + comment = comment_split_line[1:] + + split_line = line.rsplit(":", 1) + manifest_hash = split_line[-1] + tags_list = ' '.join(split_line[:-1]).split(",") + + if not is_sha256_hash(manifest_hash) or not tags_list: + logging.warning("image-tag-to-hash file malformed. Skipping entry %s", line) + continue + + tags_and_comments = hash_to_tags.get(manifest_hash, None) + if tags_and_comments is None: + known_tags = tags_list + known_comment = comment + else: + known_tags = tags_and_comments[0] + for tag in tags_list: + if tag not in known_tags: + known_tags.append(tag) + known_comment = tags_and_comments[1] + known_comment.extend(comment) + + hash_to_tags[manifest_hash] = (known_tags, known_comment) + + for tag in tags_list: + cur_manifest = tag_to_hash.get(tag, None) + if cur_manifest is not None: + logging.warning(f"tag_to_hash already has manifest {cur_manifest} defined for tag {tag}. " + f"This entry will be overwritten") + tag_to_hash[tag] = manifest_hash + return hash_to_tags, tag_to_hash + + +def remove_tag_from_dicts(hash_to_tags, tag_to_hash, tag): + if not hash_to_tags: + logging.debug("hash_to_tags is null. Not removing tag %s", tag) + return + + prev_hash = tag_to_hash.get(tag, None) + + if prev_hash is not None: + del tag_to_hash[tag] + prev_tags, prev_comment = hash_to_tags.get(prev_hash, (None, None)) + prev_tags.remove(tag) + if prev_tags == 0: + del hash_to_tags[prev_hash] + else: + hash_to_tags[prev_hash] = (prev_tags, prev_comment) + else: + logging.debug("Tag not found. Not removing tag: %s", tag) + + +def remove_image_hash_from_dicts(hash_to_tags, tag_to_hash, image_hash): + if not hash_to_tags: + logging.debug("hash_to_tags is null. Not removing image_hash %s", image_hash) + return + logging.debug("hash_to_tags: %s", str(hash_to_tags)) + logging.debug("Removing image_hash from dicts: %s", image_hash) + prev_tags, prev_comments = hash_to_tags.get(image_hash, None) + + if prev_tags is not None: + hash_to_tags.pop(image_hash) + for tag in prev_tags: + del tag_to_hash[tag] + + +def add_tag_to_dicts(hash_to_tags, tag_to_hash, tag, manifest_hash, comment): + tag_to_hash[tag] = manifest_hash + new_tags_and_comments = hash_to_tags.get(manifest_hash, None) + if new_tags_and_comments is None: + new_tags = [tag] + new_comment = [comment] + else: + new_tags = new_tags_and_comments[0] + new_comment = new_tags_and_comments[1] + if tag not in new_tags: + new_tags.append(tag) + if comment and comment not in new_comment: + new_comment.append(comment) + hash_to_tags[manifest_hash] = (new_tags, new_comment) + + +def write_local_image_tag_to_hash(image_tag_to_hash, hash_to_tags): + file_contents = [] + for key, value in hash_to_tags.iteritems(): + manifest_hash = key + # Sort tags list to preserve consistent order + value[0].sort() + tags = ','.join(map(str, value[0])) + if tags: + # Sort comments list to preserve consistent order + value[1].sort() + comment = ', '.join(map(str, value[1])) + if comment: + comment = "#" + comment + file_contents.append(tags + ":" + manifest_hash + comment + "\n") + + file_contents.sort() + with open(image_tag_to_hash, 'w') as file_pointer: + for val in file_contents: + file_pointer.write(val) + + +def update_dicts_for_multiple_tags(hash_to_tags, tag_to_hash, tags, manifest_hash, comment): + for tag in tags: + update_dicts(hash_to_tags, tag_to_hash, tag, manifest_hash, comment) + + +def update_dicts(hash_to_tags, tag_to_hash, tag, manifest_hash, comment): + remove_tag_from_dicts(hash_to_tags, tag_to_hash, tag) + add_tag_to_dicts(hash_to_tags, tag_to_hash, tag, manifest_hash, comment) + + +def remove_from_dicts(hash_to_tags, tag_to_hash, tags): + for tag in tags: + logging.debug("removing tag: %s", tag) + remove_tag_from_dicts(hash_to_tags, tag_to_hash, tag) + + +def populate_tag_dicts(image_tag_to_hash, local_image_tag_to_hash): + return populate_tag_dicts_set_root(image_tag_to_hash, local_image_tag_to_hash, None) + + +def populate_tag_dicts_set_root(image_tag_to_hash, local_image_tag_to_hash, hdfs_root): + # Setting hdfs_root to None will default it to using the global + global HDFS_ROOT + if not hdfs_root: + hdfs_root = HDFS_ROOT + + hdfs_get(hdfs_root + "/" + image_tag_to_hash, local_image_tag_to_hash, raise_on_error=True) + image_tag_to_hash_hash = calculate_file_hash(local_image_tag_to_hash) + + if image_tag_to_hash_hash: + hash_to_tags, tag_to_hash = read_image_tag_to_hash(local_image_tag_to_hash) + else: + hash_to_tags = {} + tag_to_hash = {} + return hash_to_tags, tag_to_hash, image_tag_to_hash_hash + + +def setup_squashfs_hdfs_dirs(hdfs_dirs, image_tag_to_hash_path): + logging.debug("Setting up squashfs hdfs_dirs: %s", str(hdfs_dirs)) + setup_hdfs_dirs(hdfs_dirs) + if not does_hdfs_entry_exist(image_tag_to_hash_path, raise_on_error=False): + hdfs_touchz(image_tag_to_hash_path) + hdfs_chmod("755", image_tag_to_hash_path) + + +def skopeo_copy_image(pull_format, image, skopeo_format, skopeo_dir): + logging.info("Pulling image: %s", image) + if os.path.isdir(skopeo_dir): + raise Exception("Skopeo output directory already exists. " + + "Please delete and try again " + + "Directory: " + skopeo_dir) + pull_fmt_string = get_pull_fmt_string(pull_format) + shell_command(["skopeo", "copy", pull_fmt_string + image, + skopeo_format + ":" + skopeo_dir], False, True, True) + + +def untar_layer(tmp_dir, layer_path): + shell_command(["tar", "-C", tmp_dir, "--xattrs", "--xattrs-include='*'", "-xf", layer_path], False, True, True) + + +def tar_file_search(archive, target): + out, err, rc = shell_command(["tar", "-xf", archive, target, "-O"], False, False, False) + return out + + +def set_fattr(directory): + shell_command(["setfattr", "-n", "trusted.overlay.opaque", "-v", "y", directory], False, True, True) + + +def make_whiteout_block_device(file_path, whiteout): + shell_command(["mknod", "-m", "000", file_path, "c", "0", "0"], False, True, True) + + out, err, rc = shell_command(["stat", "-c", "%U:%G", whiteout], False, True, True) + perms = str(out).strip() + + shell_command(["chown", perms, file_path], False, True, True) + + +def convert_oci_whiteouts(tmp_dir): + out, err, rc = shell_command(["find", tmp_dir, "-name", ".wh.*"], False, False, True) + whiteouts = str(out).splitlines() + for whiteout in whiteouts: + if whiteout == 0: + continue + basename = os.path.basename(whiteout) + directory = os.path.dirname(whiteout) + if basename == ".wh..wh..opq": + set_fattr(directory) + else: + whiteout_string = ".wh." + idx = basename.rfind(whiteout_string) + bname = basename[idx + len(whiteout_string):] + file_path = os.path.join(directory, bname) + make_whiteout_block_device(file_path, whiteout) + shell_command(["rm", whiteout], False, True, True) + + +def dir_to_squashfs(tmp_dir, squash_path): + shell_command(["mksquashfs", tmp_dir, squash_path, "-write-queue", "4096", "-read-queue", "4096", + "-fragment-queue", "4096"], + False, True, True) + + +def upload_to_hdfs(src, dest, replication, mode, force=False): + if does_hdfs_entry_exist(dest, raise_on_error=False): + if not force: + logging.warning("Not uploading to HDFS. File already exists: %s", dest) + return + logging.info("File already exists, but overwriting due to force option: %s", dest) + + hdfs_put(src, dest, force) + hdfs_setrep(replication, dest) + hdfs_chmod(mode, dest) + logging.info(f"Uploaded file {dest} with replication {replication} and permissions {mode}") + + +def atomic_upload_mv_to_hdfs(src, dest, replication, image_tag_to_hash_file_hash): + global HADOOP_PREFIX + global HADOOP_BIN_DIR + + local_hash = calculate_file_hash(src) + if local_hash == image_tag_to_hash_file_hash: + logging.info("image_tag_to_hash file unchanged. Not uploading") + return + + tmp_dest = dest + ".tmp" + try: + if does_hdfs_entry_exist(tmp_dest, raise_on_error=False): + hdfs_rm(tmp_dest) + hdfs_put(src, tmp_dest) + hdfs_setrep(replication, tmp_dest) + hdfs_chmod("444", tmp_dest) + + jar_path = HADOOP_PREFIX + "/share/hadoop/tools/lib/hadoop-extras-*.jar" + jar_file = None + for file in glob.glob(jar_path): + jar_file = file + + if not jar_file: + raise Exception("SymlinkTool Jar doesn't exist: %s" % jar_path) + + logging.debug("jar_file: " + jar_file) + + shell_command([HADOOP_BIN_DIR + "/hadoop", "jar", jar_file, "org.apache.hadoop.tools.SymlinkTool", + "mvlink", "-f", tmp_dest, dest], False, False, True) + + except Exception as ex: + if does_hdfs_entry_exist(tmp_dest, raise_on_error=False): + hdfs_rm(tmp_dest) + raise Exception("image tag to hash file upload failed, exception=" + str(ex)) + + +def docker_to_squash(layer_dir, layer, working_dir): + tmp_dir = os.path.join(working_dir, "expand_archive_" + layer) + layer_path = os.path.join(layer_dir, layer) + squash_path = layer_path + ".sqsh" + + if os.path.isdir(tmp_dir): + raise Exception("tmp_dir already exists. Please delete and try again " + + "Directory: " + tmp_dir) + os.makedirs(tmp_dir) + + try: + untar_layer(tmp_dir, layer_path) + convert_oci_whiteouts(tmp_dir) + dir_to_squashfs(tmp_dir, squash_path) + finally: + os.remove(layer_path) + shell_command(["rm", "-rf", tmp_dir], False, True, True) + + +def check_image_for_magic_file(magic_file, skopeo_dir, layers): + magic_file_absolute = magic_file.strip("/") + logging.debug("Searching for magic file %s", magic_file_absolute) + for layer in layers: + ret = tar_file_search(os.path.join(skopeo_dir, layer), magic_file_absolute) + if ret: + logging.debug("Found magic file %s in layer %s", magic_file_absolute, layer) + logging.debug("Magic file %s has contents:\n%s", magic_file_absolute, ret) + return ret + raise Exception(f"Magic file {magic_file_absolute} doesn't exist in any layer") + + +def pull_build_push_update(args): + global HDFS_ROOT + global HDFS_MANIFEST_DIR + global HDFS_CONFIG_DIR + global HDFS_LAYERS_DIR + global HDFS_UNREF_DIR + + skopeo_format = args.skopeo_format + pull_format = args.pull_format + image_tag_to_hash = args.image_tag_to_hash + replication = args.replication + force = args.force + images_and_tags = args.images_and_tags + check_magic_file = args.check_magic_file + magic_file = args.magic_file + bootstrap = args.bootstrap + + working_dir = None + + try: + working_dir = get_working_dir(args.working_dir) + local_image_tag_to_hash = os.path.join(working_dir, os.path.basename(image_tag_to_hash)) + if bootstrap: + hdfs_dirs = [HDFS_ROOT, HDFS_LAYERS_DIR, HDFS_CONFIG_DIR, HDFS_MANIFEST_DIR, HDFS_UNREF_DIR] + image_tag_to_hash_path = HDFS_ROOT + "/" + image_tag_to_hash + setup_squashfs_hdfs_dirs(hdfs_dirs, image_tag_to_hash_path) + hash_to_tags, tag_to_hash, image_tag_to_hash_hash = populate_tag_dicts(image_tag_to_hash, + local_image_tag_to_hash) + + for image_and_tag_arg in images_and_tags: + image, tags = split_image_and_tag(image_and_tag_arg) + if not image or not tags: + raise Exception("Positional parameter requires an image and at least 1 tag: " + image_and_tag_arg) + + logging.info("Working on image %s with tags %s", image, str(tags)) + manifest, manifest_hash = get_manifest_from_docker_image(pull_format, image) + + layers = get_layer_hashes_from_manifest(manifest) + config_hash = get_config_hash_from_manifest(manifest) + + logging.debug("Layers: %s", str(layers)) + logging.debug("Config: %s", str(config_hash)) + + update_dicts_for_multiple_tags(hash_to_tags, tag_to_hash, tags, manifest_hash, image) + + hdfs_files_to_check = [HDFS_MANIFEST_DIR + "/" + manifest_hash, HDFS_CONFIG_DIR + "/" + config_hash] + + for layer in layers: + hdfs_files_to_check.append(HDFS_LAYERS_DIR + "/" + layer + ".sqsh") + + if does_hdfs_entry_exist(hdfs_files_to_check, raise_on_error=False): + if not force: + logging.info("All image files exist in HDFS, skipping this image") + continue + logging.info("All image files exist in HDFS, but force option set, so overwriting image") + + skopeo_dir = os.path.join(working_dir, image.split("/")[-1]) + logging.debug("skopeo_dir: %s", skopeo_dir) + + skopeo_copy_image(pull_format, image, skopeo_format, skopeo_dir) + + if check_magic_file: + check_image_for_magic_file(magic_file, skopeo_dir, layers) + + for layer in layers: + logging.info("Squashifying and uploading layer: %s", layer) + hdfs_squash_path = HDFS_LAYERS_DIR + "/" + layer + ".sqsh" + if does_hdfs_entry_exist(hdfs_squash_path, raise_on_error=False): + if force: + logging.info(f"Layer already exists, but overwriting due to force option: {layer}") + else: + logging.info(f"Layer exists. Skipping and not squashifying or uploading: {layer}") + continue + + docker_to_squash(skopeo_dir, layer, working_dir) + squash_path = os.path.join(skopeo_dir, layer + ".sqsh") + squash_name = os.path.basename(squash_path) + upload_to_hdfs(squash_path, HDFS_LAYERS_DIR + "/" + squash_name, replication, "444", force) + + config_local_path = os.path.join(skopeo_dir, config_hash) + upload_to_hdfs(config_local_path, + HDFS_CONFIG_DIR + "/" + os.path.basename(config_local_path), + replication, "444", force) + + manifest_local_path = os.path.join(skopeo_dir, "manifest.json") + upload_to_hdfs(manifest_local_path, HDFS_MANIFEST_DIR + "/" + manifest_hash, replication, "444", force) + + write_local_image_tag_to_hash(local_image_tag_to_hash, hash_to_tags) + atomic_upload_mv_to_hdfs(local_image_tag_to_hash, HDFS_ROOT + "/" + image_tag_to_hash, + replication, image_tag_to_hash_hash) + finally: + if working_dir: + if os.path.isdir(working_dir): + shell_command(["rm", "-rf", working_dir], False, True, True) + + +def pull_build(args): + skopeo_format = args.skopeo_format + pull_format = args.pull_format + images_and_tags = args.images_and_tags + check_magic_file = args.check_magic_file + magic_file = args.magic_file + + for image_and_tag_arg in images_and_tags: + image, tags = split_image_and_tag(image_and_tag_arg) + if not image or not tags: + raise Exception("Positional parameter requires an image and at least 1 tag: " + image_and_tag_arg) + + logging.info(f"Working on image {image} with tags {tags}") + manifest, manifest_hash = get_manifest_from_docker_image(pull_format, image) + + layers = get_layer_hashes_from_manifest(manifest) + config_hash = get_config_hash_from_manifest(manifest) + + logging.debug(f"Layers: {layers}") + logging.debug(f"Config: {config_hash}") + skopeo_dir = None + + try: + working_dir = get_working_dir(args.working_dir) + skopeo_dir = os.path.join(working_dir, image.split("/")[-1]) + logging.debug(f"skopeo_dir: {skopeo_dir}") + skopeo_copy_image(pull_format, image, skopeo_format, skopeo_dir) + + if check_magic_file: + check_image_for_magic_file(magic_file, skopeo_dir, layers) + + for layer in layers: + logging.info(f"Squashifying layer: {layer}") + docker_to_squash(skopeo_dir, layer, working_dir) + + except Exception as _: + if skopeo_dir and os.path.isdir(skopeo_dir): + shutil.rmtree(skopeo_dir) + raise + + +def push_update(args): + global HDFS_ROOT + global HDFS_MANIFEST_DIR + global HDFS_CONFIG_DIR + global HDFS_LAYERS_DIR + global HDFS_UNREF_DIR + + image_tag_to_hash = args.image_tag_to_hash + replication = args.replication + force = args.force + images_and_tags = args.images_and_tags + bootstrap = args.bootstrap + + local_image_tag_to_hash = None + + try: + working_dir = get_working_dir(args.working_dir) + local_image_tag_to_hash = os.path.join(working_dir, os.path.basename(image_tag_to_hash)) + if bootstrap: + hdfs_dirs = [HDFS_ROOT, HDFS_LAYERS_DIR, HDFS_CONFIG_DIR, HDFS_MANIFEST_DIR, HDFS_UNREF_DIR] + image_tag_to_hash_path = HDFS_ROOT + "/" + image_tag_to_hash + setup_squashfs_hdfs_dirs(hdfs_dirs, image_tag_to_hash_path) + hash_to_tags, tag_to_hash, image_tag_to_hash_hash = populate_tag_dicts(image_tag_to_hash, + local_image_tag_to_hash) + + for image_and_tag_arg in images_and_tags: + image, tags = split_image_and_tag(image_and_tag_arg) + if not image or not tags: + raise Exception("Positional parameter requires an image and at least 1 tag: " + image_and_tag_arg) + + logging.info("Working on image %s with tags %s", image, str(tags)) + skopeo_dir = os.path.join(working_dir, image.split("/")[-1]) + if not os.path.exists(skopeo_dir): + raise Exception(f"skopeo_dir doesn't exists: {skopeo_dir}") + manifest, manifest_hash = get_local_manifest_from_path(f"{skopeo_dir}/manifest.json") + + layers = get_layer_hashes_from_manifest(manifest) + config_hash = get_config_hash_from_manifest(manifest) + + logging.debug("Layers: %s", str(layers)) + logging.debug("Config: %s", str(config_hash)) + + update_dicts_for_multiple_tags(hash_to_tags, tag_to_hash, tags, manifest_hash, image) + + hdfs_files_to_check = [f"{HDFS_MANIFEST_DIR}/{manifest_hash}", f"{HDFS_CONFIG_DIR}/{config_hash}"] + + for layer in layers: + hdfs_files_to_check.append(f"{HDFS_LAYERS_DIR}/{layer}.sqsh") + + if does_hdfs_entry_exist(hdfs_files_to_check, raise_on_error=False): + if not force: + logging.info("All image files exist in HDFS, skipping this image") + continue + logging.info("All image files exist in HDFS, but force option set, so overwriting image") + + for layer in layers: + hdfs_squash_path = f"{HDFS_LAYERS_DIR}/{layer}.sqsh" + if does_hdfs_entry_exist(hdfs_squash_path, raise_on_error=False): + if force: + logging.info(f"Layer already exists, but overwriting due to force option: {layer}") + else: + logging.info(f"Layer exists. Skipping and not squashifying or uploading: {layer}") + continue + + squash_path = os.path.join(skopeo_dir, layer + ".sqsh") + squash_name = os.path.basename(squash_path) + upload_to_hdfs(squash_path, f"{HDFS_LAYERS_DIR}/{squash_name}", replication, "444", force) + + config_local_path = os.path.join(skopeo_dir, config_hash) + upload_to_hdfs(config_local_path, + f"{HDFS_CONFIG_DIR}/" + os.path.basename(config_local_path), + replication, "444", force) + + manifest_local_path = os.path.join(skopeo_dir, "manifest.json") + upload_to_hdfs(manifest_local_path, HDFS_MANIFEST_DIR + "/" + manifest_hash, replication, "444", force) + + write_local_image_tag_to_hash(local_image_tag_to_hash, hash_to_tags) + atomic_upload_mv_to_hdfs(local_image_tag_to_hash, f"{HDFS_ROOT}/{image_tag_to_hash}", + replication, image_tag_to_hash_hash) + finally: + if local_image_tag_to_hash: + if os.path.isfile(local_image_tag_to_hash): + os.remove(local_image_tag_to_hash) + + +def remove_image(args): + global HDFS_ROOT + global HDFS_MANIFEST_DIR + global HDFS_CONFIG_DIR + global HDFS_LAYERS_DIR + global HDFS_UNREF_DIR + + image_tag_to_hash = args.image_tag_to_hash + replication = args.replication + images_or_tags = args.images_or_tags + working_dir = None + + try: + working_dir = get_working_dir(args.working_dir) + local_image_tag_to_hash = os.path.join(working_dir, os.path.basename(image_tag_to_hash)) + + images_and_tags_to_remove = [] + for image_or_tag_arg in images_or_tags: + images_and_tags_to_remove.extend(image_or_tag_arg.split(",")) + + logging.debug("images_and_tags_to_remove:\n%s", images_and_tags_to_remove) + + hash_to_tags, tag_to_hash, image_tag_to_hash_hash = populate_tag_dicts(image_tag_to_hash, + local_image_tag_to_hash) + + logging.debug("hash_to_tags: %s", str(hash_to_tags)) + logging.debug("tag_to_hash: %s", str(tag_to_hash)) + + known_images = get_all_known_images() + if not known_images: + logging.warn("No known images\n") + return + + images_to_remove = get_images_from_args(images_and_tags_to_remove, tag_to_hash, known_images) + + logging.debug("images_to_remove:\n%s", images_to_remove) + if not images_to_remove: + logging.warning("No images to remove") + return + + delete_list = get_delete_list_from_images_to_remove(images_to_remove, known_images) + + for image_to_remove in images_to_remove: + remove_image_hash_from_dicts(hash_to_tags, tag_to_hash, image_to_remove.manifest_stat.name) + + write_local_image_tag_to_hash(local_image_tag_to_hash, hash_to_tags) + atomic_upload_mv_to_hdfs(local_image_tag_to_hash, HDFS_ROOT + "/" + image_tag_to_hash, replication, + image_tag_to_hash_hash) + + hdfs_rm(delete_list) + + finally: + if working_dir: + if os.path.isdir(working_dir): + shutil.rmtree(working_dir) + + +def get_images_from_args(images_and_tags, tag_to_hash, known_images): + images = [] + + if isinstance(images_and_tags, Iterable): + for image_arg in images_and_tags: + image = get_image_hash_from_arg(image_arg, tag_to_hash, known_images) + if image: + images.append(image) + else: + image_arg = images_and_tags[0] + image = get_image_hash_from_arg(image_arg, tag_to_hash, known_images) + if image: + images.append(image) + + return images + + +def get_image_hash_from_arg(image_str, tag_to_hash, known_images): + if is_sha256_hash(image_str): + image_hash = image_str + else: + image_hash = tag_to_hash.get(image_str, None) + + if image_hash: + image = get_known_image_by_hash(image_hash, known_images) + else: + logging.warn("image tag unknown: %s", image_str) + return None + + return image + + +def get_delete_list_from_images_to_remove(images_to_remove, known_images): + global HDFS_ROOT + global HDFS_MANIFEST_DIR + global HDFS_CONFIG_DIR + global HDFS_LAYERS_DIR + global HDFS_UNREF_DIR + + layers_to_keep = [] + delete_list = [] + + for image in known_images: + if image not in images_to_remove: + layers_to_keep.extend(image.layers) + + for image_to_remove in images_to_remove: + delete_list.append(HDFS_MANIFEST_DIR + "/" + image_to_remove.manifest_stat.name) + delete_list.append(HDFS_CONFIG_DIR + "/" + image_to_remove.config) + if image_to_remove.unref_file_stat: + delete_list.append(HDFS_UNREF_DIR + "/" + image_to_remove.unref_file_stat.name) + + layers = image_to_remove.layers + for layer in layers: + if layer not in layers_to_keep: + layer_path = HDFS_LAYERS_DIR + "/" + layer + ".sqsh" + if layer_path not in delete_list: + delete_list.append(layer_path) + + logging.debug("delete_list:\n%s", delete_list) + + return delete_list + + +def add_remove_tag(args): + global HDFS_ROOT + global HDFS_MANIFEST_DIR + + pull_format = args.pull_format + image_tag_to_hash = args.image_tag_to_hash + replication = args.replication + sub_command = args.sub_command + images_and_tags = args.images_and_tags + + working_dir = None + + try: + working_dir = get_working_dir(args.working_dir) + local_image_tag_to_hash = os.path.join(working_dir, os.path.basename(image_tag_to_hash)) + hash_to_tags, tag_to_hash, image_tag_to_hash_hash = populate_tag_dicts(image_tag_to_hash, + local_image_tag_to_hash) + + for image_and_tag_arg in images_and_tags: + if sub_command == "add-tag": + image, tags = split_image_and_tag(image_and_tag_arg) + if is_sha256_hash(image): + manifest_hash = image + else: + manifest_hash = tag_to_hash.get(image, None) + + if manifest_hash: + manifest_path = HDFS_MANIFEST_DIR + "/" + manifest_hash + out, err, returncode = hdfs_cat(manifest_path) + manifest = json.loads(out) + logging.debug("image tag exists for %s", image) + else: + manifest, manifest_hash = get_manifest_from_docker_image(pull_format, image) + + update_dicts_for_multiple_tags(hash_to_tags, tag_to_hash, tags, manifest_hash, image) + + elif sub_command == "remove-tag": + tags = image_and_tag_arg.split(",") + image = None + manifest = None + manifest_hash = 0 + remove_from_dicts(hash_to_tags, tag_to_hash, tags) + else: + raise Exception(f"Invalid sub_command: {sub_command}") + + write_local_image_tag_to_hash(local_image_tag_to_hash, hash_to_tags) + atomic_upload_mv_to_hdfs(local_image_tag_to_hash, HDFS_ROOT + "/" + image_tag_to_hash, replication, + image_tag_to_hash_hash) + finally: + if working_dir: + if os.path.isdir(working_dir): + shutil.rmtree(working_dir) + + +def copy_update(args): + image_tag_to_hash = args.image_tag_to_hash + replication = args.replication + force = args.force + src_root = args.src_root + dest_root = args.dest_root + images_and_tags = args.images_and_tags + bootstrap = args.bootstrap + + src_layers_dir = src_root + "/layers" + src_config_dir = src_root + "/config" + src_manifest_dir = src_root + "/manifests" + dest_layers_dir = dest_root + "/layers" + dest_config_dir = dest_root + "/config" + dest_manifest_dir = dest_root + "/manifests" + dest_unref_dir = dest_root + "/unreferenced" + + if bootstrap: + hdfs_dirs = [dest_root, dest_layers_dir, dest_config_dir, dest_manifest_dir, dest_unref_dir] + image_tag_to_hash_path = dest_root + "/" + image_tag_to_hash + setup_squashfs_hdfs_dirs(hdfs_dirs, image_tag_to_hash_path) + + working_dir = None + + try: + working_dir = get_working_dir(args.working_dir) + local_src_image_tag_to_hash = os.path.join(working_dir, "src-" + + os.path.basename(image_tag_to_hash)) + local_dest_image_tag_to_hash = os.path.join(working_dir, "dest-" + + os.path.basename(image_tag_to_hash)) + + src_hash_to_tags, src_tag_to_hash, src_image_tag_to_hash_hash = populate_tag_dicts_set_root(image_tag_to_hash, + local_src_image_tag_to_hash, + src_root) + dest_hash_to_tags, dest_tag_to_hash, dest_image_tag_to_hash_hash = populate_tag_dicts_set_root( + image_tag_to_hash, local_dest_image_tag_to_hash, dest_root) + + for image_and_tag_arg in images_and_tags: + image, tags = split_image_and_tag(image_and_tag_arg) + if not image: + raise Exception("Positional parameter requires an image: " + image_and_tag_arg) + if not tags: + logging.debug("Tag not given. Using image tag instead: %s", image) + tags = [image] + + src_manifest_hash = src_tag_to_hash.get(image, None) + if not src_manifest_hash: + logging.info("Manifest not found for image %s. Skipping", image) + continue + + src_manifest_path = src_manifest_dir + "/" + src_manifest_hash + dest_manifest_path = dest_manifest_dir + "/" + src_manifest_hash + src_manifest, src_manifest_hash = get_hdfs_manifest_from_path(src_manifest_path) + + src_config_hash = get_config_hash_from_manifest(src_manifest) + src_config_path = src_config_dir + "/" + src_config_hash + dest_config_path = dest_config_dir + "/" + src_config_hash + + src_layers = get_layer_hashes_from_manifest(src_manifest) + src_layers_paths = [src_layers_dir + "/" + layer + ".sqsh" for layer in src_layers] + dest_layers_paths = [dest_layers_dir + "/" + layer + ".sqsh" for layer in src_layers] + + logging.debug("Copying Manifest: %s", str(src_manifest_path)) + logging.debug("Copying Layers: %s", str(src_layers_paths)) + logging.debug("Copying Config: %s", str(src_config_hash)) + + if not does_hdfs_entry_exist(dest_layers_paths, raise_on_error=False): + dest_layers_paths = [] + for layer in src_layers: + dest_layer_path = dest_layers_dir + "/" + layer + ".sqsh" + src_layer_path = src_layers_dir + "/" + layer + ".sqsh" + if not does_hdfs_entry_exist(dest_layer_path, raise_on_error=False): + hdfs_cp(src_layer_path, dest_layer_path, force) + dest_layers_paths.append(dest_layer_path) + hdfs_setrep(replication, dest_layers_paths) + hdfs_chmod("444", dest_layers_paths) + + if not does_hdfs_entry_exist(dest_config_path, raise_on_error=False): + hdfs_cp(src_config_path, dest_config_dir, force) + hdfs_setrep(replication, dest_config_path) + hdfs_chmod("444", dest_config_path) + + if not does_hdfs_entry_exist(dest_manifest_path, raise_on_error=False): + hdfs_cp(src_manifest_path, dest_manifest_dir, force) + hdfs_setrep(replication, dest_manifest_path) + hdfs_chmod("444", dest_manifest_path) + + for tag in tags: + comment = None + new_tags_and_comments = src_hash_to_tags.get(src_manifest_hash, None) + if new_tags_and_comments: + comment = ', '.join(map(str, new_tags_and_comments[1])) + if comment is None: + comment = image + + update_dicts(dest_hash_to_tags, dest_tag_to_hash, tag, src_manifest_hash, comment) + + write_local_image_tag_to_hash(local_dest_image_tag_to_hash, dest_hash_to_tags) + atomic_upload_mv_to_hdfs(local_dest_image_tag_to_hash, dest_root + "/" + image_tag_to_hash, + replication, + dest_image_tag_to_hash_hash) + + finally: + if working_dir: + if os.path.isdir(working_dir): + shutil.rmtree(working_dir) + + +def query_tag(args): + global HDFS_ROOT + global HDFS_MANIFEST_DIR + global HDFS_CONFIG_DIR + global HDFS_LAYERS_DIR + + image_tag_to_hash = args.image_tag_to_hash + tags = args.tags + working_dir = None + + try: + working_dir = get_working_dir(args.working_dir) + local_image_tag_to_hash = os.path.join(working_dir, os.path.basename(image_tag_to_hash)) + hash_to_tags, tag_to_hash, image_tag_to_hash_hash = populate_tag_dicts(image_tag_to_hash, + local_image_tag_to_hash) + + logging.debug("hash_to_tags: %s", str(hash_to_tags)) + logging.debug("tag_to_hash: %s", str(tag_to_hash)) + + for tag in tags: + image_hash = tag_to_hash.get(tag, None) + if not image_hash: + logging.info("image hash mapping doesn't exist for tag %s", tag) + continue + + manifest_path = HDFS_MANIFEST_DIR + "/" + image_hash + if does_hdfs_entry_exist(manifest_path, raise_on_error=False): + logging.debug("image manifest for %s exists: %s", tag, manifest_path) + else: + logging.info("Image manifest for %s doesn't exist: %s", tag, manifest_path) + continue + + manifest, manifest_hash = get_hdfs_manifest_from_path(manifest_path) + layers = get_layer_hashes_from_manifest(manifest, False) + config_hash = get_config_hash_from_manifest(manifest) + config_path = HDFS_CONFIG_DIR + "/" + config_hash + + layers_paths = [HDFS_LAYERS_DIR + "/" + layer + ".sqsh" for layer in layers] + + logging.info("Image info for '%s'", tag) + logging.info(manifest_path) + logging.info(config_path) + for layer in layers_paths: + logging.info(layer) + + finally: + if working_dir: + if os.path.isdir(working_dir): + shutil.rmtree(working_dir) + + +def list_tags(args): + global HDFS_ROOT + + image_tag_to_hash = args.image_tag_to_hash + + hdfs_image_tag_to_hash = HDFS_ROOT + "/" + image_tag_to_hash + hdfs_cat(hdfs_image_tag_to_hash, True, True, True) + + +def bootstrap_setup(args): + global HDFS_ROOT + global HDFS_MANIFEST_DIR + global HDFS_CONFIG_DIR + global HDFS_LAYERS_DIR + global HDFS_UNREF_DIR + + image_tag_to_hash = args.image_tag_to_hash + + hdfs_dirs = [HDFS_ROOT, HDFS_LAYERS_DIR, HDFS_CONFIG_DIR, HDFS_MANIFEST_DIR, HDFS_UNREF_DIR] + image_tag_to_hash_path = HDFS_ROOT + "/" + image_tag_to_hash + setup_squashfs_hdfs_dirs(hdfs_dirs, image_tag_to_hash_path) + + +def cleanup_untagged_images(args): + global HDFS_ROOT + global HDFS_MANIFEST_DIR + global HDFS_CONFIG_DIR + global HDFS_LAYERS_DIR + global HDFS_UNREF_DIR + global DEAD_PERMS + + image_tag_to_hash = args.image_tag_to_hash + working_dir = get_working_dir(args.working_dir) + + local_image_tag_to_hash = os.path.join(working_dir, os.path.basename(image_tag_to_hash)) + + try: + hash_to_tags, tag_to_hash, image_tag_to_hash_hash = populate_tag_dicts(image_tag_to_hash, + local_image_tag_to_hash) + logging.debug("hash_to_tags: %s\n", hash_to_tags) + logging.debug("tag_to_hash: %s\n", tag_to_hash) + + known_images = get_all_known_images() + tagged_images = [image for image in known_images if image.manifest_stat.name in hash_to_tags.keys()] + untagged_images = get_untagged_images(known_images, tagged_images) + stale_images = get_stale_images(untagged_images) + dead_images = get_dead_images(untagged_images) + + cleanup_handle_tagged_images(tagged_images) + cleanup_handle_untagged_images(untagged_images) + cleanup_handle_stale_images(stale_images) + cleanup_handle_dead_images(dead_images, known_images) + + finally: + if working_dir: + if os.path.isdir(working_dir): + shutil.rmtree(working_dir) + + +class Filestat: + def __init__(self, perms, mod_time, name): + self.perms = perms + self.mod_time = mod_time + self.name = name + + def __repr__(self): + return "Perms: {}, Mod time: {}, Name: {}\n".format(self.perms, self.mod_time, self.name) + + def __str__(self): + return "Perms: {}, Mod time: {}, Name: {}\n".format(self.perms, self.mod_time, self.name) + + +class Image: + def __init__(self, manifest_stat, layers, config, unref_file_stat): + self.manifest_stat = manifest_stat + self.layers = layers + self.config = config + self.unref_file_stat = unref_file_stat + + def __repr__(self): + return "Manifest: {}, Layers: {}, Config: {}, Unreferenced File: {}\n".format(self.manifest_stat, self.layers, + self.config, self.unref_file_stat) + + def __str__(self): + return "Manifest: {}, Layers: {}, Config: {}, Unreferenced File: {}\n".format(self.manifest_stat, self.layers, + self.config, self.unref_file_stat) + + +def get_all_known_images(): + global HDFS_MANIFEST_DIR + global HDFS_UNREF_DIR + + known_manifest_paths, err, returncode = hdfs_stat(HDFS_MANIFEST_DIR + "/*", "%a %Y %n", False, False, False) + known_manifests = [image for image in known_manifest_paths.split("\n") if image is not "" or None] + + unref_manifest_paths, err, returncode = hdfs_stat(HDFS_UNREF_DIR + "/*", "%a %Y %n", False, False, False) + logging.debug("unref_manifest_paths:\n%s", unref_manifest_paths) + unref_manifests = [] + if unref_manifest_paths: + unref_manifests = [image for image in unref_manifest_paths.split("\n") if image is not "" or None] + logging.debug("unref_manifests:\n%s", unref_manifests) + + unref_manifests_stats_dict = {} + if unref_manifests: + for unref_manifest in unref_manifests: + unref_manifest_split = unref_manifest.split() + unref_manifest_perms = unref_manifest_split[0] + unref_manifest_mod_time = long(unref_manifest_split[1]) + unref_manifest_name = unref_manifest_split[2] + unref_manifest_stat = Filestat(unref_manifest_perms, unref_manifest_mod_time, unref_manifest_name) + unref_manifests_stats_dict[unref_manifest_name] = unref_manifest_stat + + logging.debug("unref_manifests_stats_dict:\n%s", unref_manifests_stats_dict) + + known_manifests_names = [known_manifest.split()[2] for known_manifest in known_manifests] + layers_and_configs = get_all_layers_and_configs(known_manifests_names) + + known_images = [] + for manifest in known_manifests: + manifest_split = manifest.split() + manifest_perms = manifest_split[0] + manifest_mod_time = long(manifest_split[1]) + manifest_name = manifest_split[2] + manifest_stat = Filestat(manifest_perms, manifest_mod_time, manifest_name) + + unref_image_stat = unref_manifests_stats_dict.get(manifest_name, None) + + layers = layers_and_configs[manifest_name][0] + config = layers_and_configs[manifest_name][1] + known_image = Image(manifest_stat, layers, config, unref_image_stat) + known_images.append(known_image) + + return known_images + + +def get_known_image_by_hash(image_hash, known_images): + for image in known_images: + if image_hash == image.manifest_stat.name: + return image + logging.debug("Couldn't find known image by hash:\n%s", image_hash) + return None + + +def get_all_layers_and_configs(manifest_names): + global HDFS_MANIFEST_DIR + + manifests_tuples = get_hdfs_manifests_from_paths( + [HDFS_MANIFEST_DIR + "/" + manifest_name for manifest_name in manifest_names]) + layers_and_configs = {} + + for manifest_tuple in manifests_tuples: + manifest = manifest_tuple[0] + manifest_hash = manifest_tuple[1] + layers = [] + layers.extend(get_layer_hashes_from_manifest(manifest, False)) + config = get_config_hash_from_manifest(manifest) + layers_and_configs[manifest_hash] = (layers, config) + + logging.debug("layers for %s:\n%s", manifest_hash, layers) + logging.debug("config for %s:\n%s", manifest_hash, config) + + return layers_and_configs + + +def get_untagged_images(known_images, tagged_images): + untagged_images = [] + for image in known_images: + if is_image_untagged(image, tagged_images): + untagged_images.append(image) + + logging.debug("known_images:\n%s", known_images) + logging.debug("tagged_images:\n%s", tagged_images) + logging.debug("untagged_images:\n%s", untagged_images) + return untagged_images + + +def get_stale_images(untagged_images): + stale_images = [image for image in untagged_images if is_image_stale(image)] + logging.debug("stale_images:\n%s", stale_images) + return stale_images + + +def get_dead_images(untagged_images): + dead_images = [image for image in untagged_images if is_image_dead(image)] + logging.debug("dead_images:\n%s", dead_images) + return dead_images + + +def is_image_untagged(image, tagged_images): + for tagged_image in tagged_images: + if tagged_image.manifest_stat.name == image.manifest_stat.name: + return False + return True + + +def is_image_stale(image): + return does_image_have_unref_file(image) and not does_image_have_dead_perms(image) + + +def is_image_dead(image): + return does_image_have_unref_file(image) and does_image_have_dead_perms(image) + + +def does_image_have_unref_file(image): + return image.unref_file_stat != None + + +def does_image_have_dead_perms(image): + global DEAD_PERMS + return image.manifest_stat.perms == DEAD_PERMS + + +def is_mod_time_old(mod_time): + global UNTAGGED_TRANSITION_SEC + + cutoff_time = long(time.time() * 1000) - UNTAGGED_TRANSITION_SEC * 1000 + logging.debug("Mod time: %d, Cutoff time: %d", mod_time, cutoff_time) + return mod_time < cutoff_time + + +def cleanup_handle_tagged_images(tagged_images): + # Remove unreferenced files if they exist + if not tagged_images: + return + + unref_remove_list = [] + for image in tagged_images: + if does_image_have_unref_file(image): + unref_remove_list.append(image) + + remove_unref_files(unref_remove_list) + + +def cleanup_handle_untagged_images(untagged_images): + # Create unreferenced file + if not untagged_images: + return + + touch_list = [] + for image in untagged_images: + if not does_image_have_unref_file(image): + touch_list.append(image) + + touch_unref_files(touch_list) + + +def cleanup_handle_stale_images(stale_images): + # Set blob permissions to 400 for old stale images + if not stale_images: + return + + make_unreadable_list = [] + for image in stale_images: + if is_mod_time_old(image.unref_file_stat.mod_time): + make_unreadable_list.append(image) + + make_manifests_unreadable(make_unreadable_list) + touch_unref_files(make_unreadable_list) + + +def cleanup_handle_dead_images(dead_images, known_images): + # Remove old dead images + if not dead_images: + return + + images_to_remove = [] + for image in dead_images: + if is_mod_time_old(image.unref_file_stat.mod_time): + images_to_remove.append(image) + + remove_dead_images(images_to_remove, known_images) + + +def make_manifests_unreadable(images): + global HDFS_MANIFEST_DIR + global DEAD_PERMS + + if not images: + return + + manifest_file_paths = [HDFS_MANIFEST_DIR + "/" + image.unref_file_stat.name for image in images] + logging.debug("Chmod %s manifest file:\n%s", DEAD_PERMS, manifest_file_paths) + hdfs_chmod(DEAD_PERMS, manifest_file_paths) + + +def touch_unref_files(images): + global HDFS_UNREF_DIR + + if not images: + return + + unref_file_paths = [HDFS_UNREF_DIR + "/" + image.manifest_stat.name for image in images] + logging.debug("Touching unref file:\n%s", unref_file_paths) + hdfs_touchz(unref_file_paths) + + +def remove_unref_files(images): + global HDFS_UNREF_DIR + + if not images: + return + + unref_file_paths = [HDFS_UNREF_DIR + "/" + image.manifest_stat.name for image in images] + logging.debug("Removing unref files:\n%s", unref_file_paths) + hdfs_rm(unref_file_paths) + + +def remove_dead_images(images_to_remove, known_images): + if not images_to_remove: + return + + logging.debug("Removing dead images:\n%s", images_to_remove) + delete_list = get_delete_list_from_images_to_remove(images_to_remove, known_images) + if delete_list: + hdfs_rm(delete_list) + + +def create_parsers(): + parser = argparse.ArgumentParser() + add_parser_default_arguments(parser) + + subparsers = parser.add_subparsers(help='sub help', dest='sub_command') + + parse_pull_build_push_update = subparsers.add_parser('pull-build-push-update', + help='Pull an image, build its squashfs' + + ' layers, push it to hdfs, and' + + ' atomically update the' + + ' image-tag-to-hash file') + parse_pull_build_push_update.set_defaults(func=pull_build_push_update) + add_parser_default_arguments(parse_pull_build_push_update) + parse_pull_build_push_update.add_argument("images_and_tags", nargs="+", + help="Image and tag argument (can specify multiple)") + + parse_pull_build = subparsers.add_parser('pull-build', + help='Pull an image and build its squashfs layers') + parse_pull_build.set_defaults(func=pull_build) + add_parser_default_arguments(parse_pull_build) + parse_pull_build.add_argument("images_and_tags", nargs="+", + help="Image and tag argument (can specify multiple)") + + parse_push_update = subparsers.add_parser('push-update', + help='Push the squashfs layers to hdfs and update' + + ' the image-tag-to-hash file') + parse_push_update.set_defaults(func=push_update) + add_parser_default_arguments(parse_push_update) + parse_push_update.add_argument("images_and_tags", nargs="+", + help="Image and tag argument (can specify multiple)") + + parse_remove_image = subparsers.add_parser('remove-image', + help='Remove an image (manifest, config, layers)' + + ' from hdfs based on its tag or manifest hash') + parse_remove_image.set_defaults(func=remove_image) + add_parser_default_arguments(parse_remove_image) + parse_remove_image.add_argument("images_or_tags", nargs="+", + help="Image or tag argument (can specify multiple)") + + parse_remove_tag = subparsers.add_parser('remove-tag', + help='Remove an image to tag mapping in the' + + ' image-tag-to-hash file') + parse_remove_tag.set_defaults(func=add_remove_tag) + add_parser_default_arguments(parse_remove_tag) + parse_remove_tag.add_argument("images_and_tags", nargs="+", + help="Image and tag argument (can specify multiple)") + + parse_add_tag = subparsers.add_parser('add-tag', + help='Add an image to tag mapping in the' + + ' image-tag-to-hash file') + parse_add_tag.set_defaults(func=add_remove_tag) + add_parser_default_arguments(parse_add_tag) + parse_add_tag.add_argument("images_and_tags", nargs="+", + help="Image and tag argument (can specify multiple)") + + parse_copy_update = subparsers.add_parser('copy-update', + help='Copy an image from hdfs in one cluster to' + + ' another and then update the' + + ' image-tag-to-hash file') + parse_copy_update.set_defaults(func=copy_update) + add_parser_default_arguments(parse_copy_update) + parse_copy_update.add_argument("src_root", + help="HDFS path for source root directory") + parse_copy_update.add_argument("dest_root", + help="HDFS path for destination root directory") + parse_copy_update.add_argument("images_and_tags", nargs="+", + help="Image and tag argument (can specify multiple)") + + parse_query_tag = subparsers.add_parser('query-tag', + help='Get the manifest, config, and layers' + + ' associated with a tag') + parse_query_tag.set_defaults(func=query_tag) + add_parser_default_arguments(parse_query_tag) + parse_query_tag.add_argument("tags", nargs="+", + help="Image or tag argument (can specify multiple)") + + parse_list_tags = subparsers.add_parser('list-tags', + help='List all tags in image-tag-to-hash file') + parse_list_tags.set_defaults(func=list_tags) + add_parser_default_arguments(parse_list_tags) + + parse_bootstrap_setup = subparsers.add_parser('bootstrap', help='Bootstrap setup of required HDFS directories') + parse_bootstrap_setup.set_defaults(func=bootstrap_setup) + add_parser_default_arguments(parse_bootstrap_setup) + + parse_cleanup_untagged_images = subparsers.add_parser('cleanup', + help='Cleanup untagged images in HDFS') + parse_cleanup_untagged_images.set_defaults(func=cleanup_untagged_images) + add_parser_default_arguments(parse_cleanup_untagged_images) + + return parser + + +def add_parser_default_arguments(parser): + parser.add_argument("--working-dir", type=str, dest='working_dir', default="dts-work-dir", + help="Name of working directory") + parser.add_argument("--skopeo-format", type=str, dest='skopeo_format', + default='dir', help="Output format for skopeo copy") + parser.add_argument("--pull-format", type=str, dest='pull_format', + default='docker', help="Pull format for skopeo") + parser.add_argument("-l", "--log", type=str, dest='LOG_LEVEL', + default="INFO", help="Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)") + parser.add_argument("--hdfs-root", type=str, dest='HDFS_ROOT', + default='/runc-root', help="The root directory in HDFS for all of the" + + "squashfs images") + parser.add_argument("--image-tag-to-hash", type=str, + dest='image_tag_to_hash', default='image-tag-to-hash', + help="image-tag-to-hash filename in hdfs") + parser.add_argument("-r", "--replication", type=int, dest='replication', + default=3, help="Replication factor for all files uploaded to HDFS") + parser.add_argument("--hadoop-prefix", type=str, dest='hadoop_prefix', + default=os.environ.get('HADOOP_PREFIX'), + help="hadoop prefix value for environment") + parser.add_argument("-f", "--force", dest='force', + action="http://github.com/store_true", default=False, help="Force overwrites in HDFS") + parser.add_argument("--check-magic-file", dest='check_magic_file', + action="http://github.com/store_true", default=False, help="Check for a specific magic file" + + "in the image before uploading") + parser.add_argument("--magic-file", type=str, dest='magic_file', + default='etc/dockerfile-version', help="The magic file to check for" + + "in the image") + parser.add_argument("--max-layers", type=int, dest='MAX_IMAGE_LAYERS', + default=37, help="Maximum number of layers an image is allowed to have") + parser.add_argument("--max-size", type=int, dest='MAX_IMAGE_SIZE', + default=10 * 1024 * 1024 * 1024, help="Maximum size an image is allowed to be") + parser.add_argument("--untagged-transition-sec", type=long, dest='UNTAGGED_TRANSITION_SEC', + default=7 * 24 * 60 * 60, help="Time that untagged images will spend in each state" + + "before moving to the next one") + parser.add_argument("--dead-perms", type=str, dest='DEAD_PERMS', + default="400", help="Permissions to set for manifests that are untagged " + + "before they are removed") + parser.add_argument("-b", "--bootstrap", dest='bootstrap', + action="http://github.com/store_true", default=False, help="Bootstrap setup" + + " of required HDFS directories") + return parser + + +def check_dependencies(): + global HADOOP_BIN_DIR + + try: + command = [HADOOP_BIN_DIR + "/hadoop", "-h"] + shell_command(command, False, False, True) + except Exception as ex: + logging.error("Could not find hadoop. Make sure HADOOP_PREFIX " + + "is set correctly either in your environment or on the command line " + + "via --hadoop-prefix" + + ", exception is " + str(ex)) + return 1 + + try: + command = ["skopeo", "-v"] + shell_command(command, False, False, True) + except Exception as _: + logging.error("Could not find skopeo. Make sure it is installed and present on the PATH") + return 1 + + try: + command = ["/usr/sbin/mksquashfs", "-version"] + shell_command(command, False, False, True) + except Exception as _: + logging.error("Could not find /usr/sbin/mksquashfs. Make sure squashfs-tools is installed " + + "and /usr/sbin/mksquashfs is present on the the PATH") + return 1 + + try: + command = ["tar", "--version"] + shell_command(command, False, False, True) + except Exception as _: + logging.error("Could not find tar. Make sure it is installed and present on the PATH") + return 1 + + try: + command = ["setfattr", "--version"] + shell_command(command, False, False, True) + except: + logging.error("Could not find setfattr . Make sure it is installed and present on the PATH") + return 1 + + return 0 + + +def main(): + global LOG_LEVEL + global HADOOP_PREFIX + global HADOOP_BIN_DIR + global HDFS_ROOT + global HDFS_MANIFEST_DIR + global HDFS_CONFIG_DIR + global HDFS_LAYERS_DIR + global HDFS_UNREF_DIR + global MAX_IMAGE_LAYERS + global MAX_IMAGE_SIZE + global UNTAGGED_TRANSITION_SEC + global ARG_MAX + global DEAD_PERMS + + if os.geteuid() != 0: + logging.error("Script must be run as root") + return + + parser = create_parsers() + args, extra = parser.parse_known_args() + + if extra: + raise Exception(f"Extra unknown arguments given: {extra}") + + ARG_MAX = os.sysconf("SC_ARG_MAX") + HDFS_ROOT = args.HDFS_ROOT + HDFS_MANIFEST_DIR = HDFS_ROOT + "/manifests" + HDFS_CONFIG_DIR = HDFS_ROOT + "/config" + HDFS_LAYERS_DIR = HDFS_ROOT + "/layers" + HDFS_UNREF_DIR = HDFS_ROOT + "/unreferenced" + MAX_IMAGE_LAYERS = args.MAX_IMAGE_LAYERS + MAX_IMAGE_SIZE = args.MAX_IMAGE_SIZE + UNTAGGED_TRANSITION_SEC = args.UNTAGGED_TRANSITION_SEC + DEAD_PERMS = args.DEAD_PERMS + LOG_LEVEL = args.LOG_LEVEL.upper() + image_tag_to_hash = args.image_tag_to_hash + + numeric_level = getattr(logging, LOG_LEVEL, None) + if not isinstance(numeric_level, int): + logging.error("Invalid log level: %s", LOG_LEVEL) + return + logging.basicConfig(format="%(levelname)s: %(message)s", level=numeric_level) + + if args.hadoop_prefix is None: + logging.error("Hadoop prefix is not set. You may set it either " + + "in your environment or via --hadoop-prefix") + return + + HADOOP_PREFIX = args.hadoop_prefix + HADOOP_BIN_DIR = HADOOP_PREFIX + "/bin" + + if check_dependencies(): + return + + if "/" in image_tag_to_hash: + logging.error("image-tag-to-hash cannot contain a /") + return + + logging.debug("args: %s", str(args)) + logging.debug("extra: %s", str(extra)) + logging.debug("image-tag-to-hash: %s", image_tag_to_hash) + logging.debug("LOG_LEVEL: %s", LOG_LEVEL) + logging.debug("HADOOP_BIN_DIR: %s", str(HADOOP_BIN_DIR)) + + args.func(args) + + +if __name__ == "__main__": + main() diff --git a/bin/flight.bash b/bin/flight.bash new file mode 100755 index 00000000000..d38dd2e1d44 --- /dev/null +++ b/bin/flight.bash @@ -0,0 +1,179 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +JDKPATH=$JAVA_HOME +BINPATH="/usr/bin/" +USER=`whoami` + +#SETTINGS=/Library/Java/JavaVirtualMachines/jdk1.8.0_51.jdk/Contents/Home/jre/lib/jfr/profile.jfc +SETTINGS=profile + +platform='unknown' +unamestr=`uname` +if [[ "$unamestr" == 'Linux' ]]; then + platform='linux' +elif [[ "$unamestr" == 'Darwin' ]]; then + platform='darwin' +elif [[ "$unamestr" == 'FreeBSD' ]]; then + platform='freebsd' +fi + +if [[ $platform == 'linux' ]]; then + if [ -z "$JDKPATH" ]; then + BINPATH="/usr/bin/" + else + BINPATH="$JDKPATH/bin/" + fi +elif [[ $platform == 'darwin' ]]; then + BINPATH="/usr/bin/" +fi + +#check if java is available at $BINPATH; if not, fall back to use java commands directly. +JAVAPATH="${BINPATH}java" +if [ -f "$JAVAPATH" ]; then + echo "$JAVAPATH found. Will use java utils from $BINPATH" +else + echo "$JAVAPATH or JAVA_HOME not found. Will use java utils directly"; + BINPATH="" +fi + +export RECORDING_NAME_PREFIX="storm-recording-" + +function start_record { + # start_record pid + timestamps=`get_recording_timestamps $1` + if [ -z "${timestamps}" ]; then + # append timestamp to ${RECORDING_NAME_PREFIX} to form a recording name + ${BINPATH}jcmd $1 JFR.start name=${RECORDING_NAME_PREFIX}${NOW} settings=${SETTINGS} + else + echo "Another recoding session is already in progress; skipping" + fi +} + +function dump_record { + timestamps=`get_recording_timestamps $1` + if [ -z "${timestamps}" ]; then + echo "No exsiting recording session to stop" + else + for start_timestamp in ${timestamps}; do + FILENAME=recording-$1-${start_timestamp}-${NOW}.jfr + ${BINPATH}jcmd $1 JFR.dump name=${RECORDING_NAME_PREFIX}${start_timestamp} filename="$2/${FILENAME}" + done + fi +} + +function stop_record { + timestamps=`get_recording_timestamps $1` + if [ -z "${timestamps}" ]; then + echo "No exsiting recording session to stop" + else + for start_timestamp in ${timestamps}; do + FILENAME=recording-$1-${start_timestamp}-${NOW}.jfr + ${BINPATH}jcmd $1 JFR.dump name=${RECORDING_NAME_PREFIX}${start_timestamp} filename="$2/${FILENAME}" + ${BINPATH}jcmd $1 JFR.stop name=${RECORDING_NAME_PREFIX}${start_timestamp} + done + fi +} + +# recoding name is coded as ${RECORDING_NAME_PREFIX}${start_timestamp}, see start_record. +# On different JFR version (e.g. 5.4 vs 5.5), the output format is different: 5.4 has double quotes for the recording name, 5.5 doesn't have double quotes. +function get_recording_timestamps { + ${BINPATH}jcmd $1 JFR.check | perl -n -e '/name=(")?$ENV{RECORDING_NAME_PREFIX}([0-9]+)(?(1)\1|)/ && print "$2 "' +} + +function jstack_record { + FILENAME=jstack-$1-${NOW}.txt + ${BINPATH}jstack $1 > "$2/${FILENAME}" 2>&1 +} + +function jmap_record { + FILENAME=recording-$1-${NOW}.bin + ${BINPATH}jmap -dump:format=b,file="$2/${FILENAME}" $1 + /bin/chmod g+r "$2/${FILENAME}" +} + +function usage_and_quit { + echo "Usage: $0 pid start [profile_settings]" + echo " $0 pid dump target_dir" + echo " $0 pid stop target_dir" + echo " $0 pid jstack target_dir" + echo " $0 pid jmap target_dir" + echo " $0 pid kill" + exit -1 +} + +# Before using this script: make sure FlightRecorder is enabled + +if [ "$#" -le 1 ]; then + echo "Wrong number of arguments.." + usage_and_quit + +fi +# call this script with the process pid, example: "./flight PID start" or "./flight PID stop" +PID="$1" +CMD="$2" + +if /bin/ps -p $PID > /dev/null +then + if [[ $platform == 'linux' ]]; then + USER=`/bin/ps -ouser --noheader $PID` + elif [[ $platform == 'darwin' ]]; then + USER=`/bin/ps -ouser $PID` + fi +else + echo "No such pid running: $PID" + usage_and_quit +fi + +if [ "$CMD" != "start" ] && [ "$CMD" != "kill" ]; then + if [[ $3 ]] && [[ -d $3 ]] + then + TARGETDIR="$3" + mkdir -p ${TARGETDIR} + else + echo "Missing target directory" + usage_and_quit + fi +fi + +NOW=`date +'%Y%m%d%H%M%S'` +if [ "$CMD" = "" ]; then + usage_and_quit +elif [ "$CMD" = "kill" ]; then + echo "Killing process with pid: $PID" + kill -9 ${PID} +elif [ "$CMD" = "start" ]; then + if [[ $3 ]] + then + SETTINGS=$3 + fi + start_record ${PID} +elif [ "$CMD" = "stop" ]; then + echo "Capturing dump before stopping in dir $TARGETDIR" + stop_record ${PID} ${TARGETDIR} +elif [ "$CMD" = "jstack" ]; then + echo "Capturing dump in dir $TARGETDIR" + jstack_record ${PID} ${TARGETDIR} +elif [ "$CMD" = "jmap" ]; then + echo "Capturing dump in dir $TARGETDIR" + jmap_record ${PID} ${TARGETDIR} +elif [ "$CMD" = "dump" ]; then + echo "Capturing dump in dir $TARGETDIR" + dump_record ${PID} ${TARGETDIR} +else + usage_and_quit +fi diff --git a/bin/storm b/bin/storm index c9885a29e28..65e3bda45eb 100755 --- a/bin/storm +++ b/bin/storm @@ -1,5 +1,5 @@ -#!/usr/bin/python - +#!/usr/bin/env bash +# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -8,465 +8,67 @@ # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# -import os -import sys -import random -import subprocess as sub -import re -import shlex -import urllib -import ConfigParser - -def identity(x): - return x - -def cygpath(x): - command = ["cygpath", "-wp", x] - p = sub.Popen(command,stdout=sub.PIPE) - output, errors = p.communicate() - lines = output.split("\n") - return lines[0] - -def init_storm_env(): - global CLUSTER_CONF_DIR - ini_file = os.path.join(CLUSTER_CONF_DIR, 'storm_env.ini') - if not os.path.isfile(ini_file): - return - config = ConfigParser.ConfigParser() - config.optionxform = str - config.read(ini_file) - options = config.options('environment') - for option in options: - value = config.get('environment', option) - os.environ[option] = value - -normclasspath = cygpath if sys.platform == 'cygwin' else identity -STORM_DIR = "/".join(os.path.realpath( __file__ ).split("/")[:-2]) -USER_CONF_DIR = os.path.expanduser("~/.storm") -CLUSTER_CONF_DIR = STORM_DIR + "/conf" -if (not os.path.isfile(USER_CONF_DIR + "/storm.yaml")): - USER_CONF_DIR = CLUSTER_CONF_DIR - -init_storm_env() - -CONFIG_OPTS = [] -CONFFILE = "" -JAR_JVM_OPTS = shlex.split(os.getenv('STORM_JAR_JVM_OPTS', '')) -JAVA_HOME = os.getenv('JAVA_HOME', None) -JAVA_CMD = 'java' if not JAVA_HOME else os.path.join(JAVA_HOME, 'bin', 'java') - -def get_config_opts(): - global CONFIG_OPTS - return "-Dstorm.options=" + ','.join(map(urllib.quote_plus,CONFIG_OPTS)) - -if not os.path.exists(STORM_DIR + "/RELEASE"): - print "******************************************" - print "The storm client can only be run from within a release. You appear to be trying to run the client from a checkout of Storm's source code." - print "\nYou can download a Storm release at http://storm-project.net/downloads.html" - print "******************************************" - sys.exit(1) - -def get_jars_full(adir): - files = os.listdir(adir) - ret = [] - for f in files: - if f.endswith(".jar"): - ret.append(adir + "/" + f) - return ret - -def get_classpath(extrajars): - ret = get_jars_full(STORM_DIR) - ret.extend(get_jars_full(STORM_DIR + "/lib")) - ret.extend(extrajars) - return normclasspath(":".join(ret)) - -def confvalue(name, extrapaths): - global CONFFILE - command = [ - JAVA_CMD, "-client", get_config_opts(), "-Dstorm.conf.file=" + CONFFILE, - "-cp", get_classpath(extrapaths), "backtype.storm.command.config_value", name - ] - p = sub.Popen(command, stdout=sub.PIPE) - output, errors = p.communicate() - lines = output.split("\n") - for line in lines: - tokens = line.split(" ") - if tokens[0] == "VALUE:": - return " ".join(tokens[1:]) - return "" - -def print_localconfvalue(name): - """Syntax: [storm localconfvalue conf-name] - - Prints out the value for conf-name in the local Storm configs. - The local Storm configs are the ones in ~/.storm/storm.yaml merged - in with the configs in defaults.yaml. - """ - print name + ": " + confvalue(name, [USER_CONF_DIR]) - -def print_remoteconfvalue(name): - """Syntax: [storm remoteconfvalue conf-name] - - Prints out the value for conf-name in the cluster's Storm configs. - The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml - merged in with the configs in defaults.yaml. - - This command must be run on a cluster machine. - """ - print name + ": " + confvalue(name, [CLUSTER_CONF_DIR]) - -def parse_args(string): - r"""Takes a string of whitespace-separated tokens and parses it into a list. - Whitespace inside tokens may be quoted with single quotes, double quotes or - backslash (similar to command-line arguments in bash). - - >>> parse_args(r'''"a a" 'b b' c\ c "d'd" 'e"e' 'f\'f' "g\"g" "i""i" 'j''j' k" "k l' l' mm n\\n''') - ['a a', 'b b', 'c c', "d'd", 'e"e', "f'f", 'g"g', 'ii', 'jj', 'k k', 'l l', 'mm', r'n\n'] - """ - re_split = re.compile(r'''((?: - [^\s"'\\] | - "(?: [^"\\] | \\.)*" | - '(?: [^'\\] | \\.)*' | - \\. - )+)''', re.VERBOSE) - args = re_split.split(string)[1::2] - args = [re.compile(r'"((?:[^"\\]|\\.)*)"').sub('\\1', x) for x in args] - args = [re.compile(r"'((?:[^'\\]|\\.)*)'").sub('\\1', x) for x in args] - return [re.compile(r'\\(.)').sub('\\1', x) for x in args] - -def exec_storm_class(klass, jvmtype="-server", jvmopts=[], extrajars=[], args=[], fork=False): - global CONFFILE - all_args = [ - JAVA_CMD, jvmtype, get_config_opts(), - "-Dstorm.home=" + STORM_DIR, - "-Djava.library.path=" + confvalue("java.library.path", extrajars), - "-Dstorm.conf.file=" + CONFFILE, - "-cp", get_classpath(extrajars), - ] + jvmopts + [klass] + list(args) - print "Running: " + " ".join(all_args) - if fork: - os.spawnvp(os.P_WAIT, JAVA_CMD, all_args) - else: - os.execvp(JAVA_CMD, all_args) # replaces the current process and - # never returns - -def jar(jarfile, klass, *args): - """Syntax: [storm jar topology-jar-path class ...] - - Runs the main method of class with the specified arguments. - The storm jars and configs in ~/.storm are put on the classpath. - The process is configured so that StormSubmitter - (http://storm.incubator.apache.org/apidocs/backtype/storm/StormSubmitter.html) - will upload the jar at topology-jar-path when the topology is submitted. - """ - exec_storm_class( - klass, - jvmtype="-client", - extrajars=[jarfile, USER_CONF_DIR, STORM_DIR + "/bin"], - args=args, - jvmopts=JAR_JVM_OPTS + ["-Dstorm.jar=" + jarfile]) - -def kill(*args): - """Syntax: [storm kill topology-name [-w wait-time-secs]] - - Kills the topology with the name topology-name. Storm will - first deactivate the topology's spouts for the duration of - the topology's message timeout to allow all messages currently - being processed to finish processing. Storm will then shutdown - the workers and clean up their state. You can override the length - of time Storm waits between deactivation and shutdown with the -w flag. - """ - exec_storm_class( - "backtype.storm.command.kill_topology", - args=args, - jvmtype="-client", - extrajars=[USER_CONF_DIR, STORM_DIR + "/bin"]) - -def activate(*args): - """Syntax: [storm activate topology-name] - - Activates the specified topology's spouts. - """ - exec_storm_class( - "backtype.storm.command.activate", - args=args, - jvmtype="-client", - extrajars=[USER_CONF_DIR, STORM_DIR + "/bin"]) - -def listtopos(*args): - """Syntax: [storm list] - - List the running topologies and their statuses. - """ - exec_storm_class( - "backtype.storm.command.list", - args=args, - jvmtype="-client", - extrajars=[USER_CONF_DIR, STORM_DIR + "/bin"]) - -def deactivate(*args): - """Syntax: [storm deactivate topology-name] - - Deactivates the specified topology's spouts. - """ - exec_storm_class( - "backtype.storm.command.deactivate", - args=args, - jvmtype="-client", - extrajars=[USER_CONF_DIR, STORM_DIR + "/bin"]) - -def rebalance(*args): - """Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]*] - - Sometimes you may wish to spread out where the workers for a topology - are running. For example, let's say you have a 10 node cluster running - 4 workers per node, and then let's say you add another 10 nodes to - the cluster. You may wish to have Storm spread out the workers for the - running topology so that each node runs 2 workers. One way to do this - is to kill the topology and resubmit it, but Storm provides a "rebalance" - command that provides an easier way to do this. - - Rebalance will first deactivate the topology for the duration of the - message timeout (overridable with the -w flag) and then redistribute - the workers evenly around the cluster. The topology will then return to - its previous state of activation (so a deactivated topology will still - be deactivated and an activated topology will go back to being activated). - - The rebalance command can also be used to change the parallelism of a running topology. - Use the -n and -e switches to change the number of workers or number of executors of a component - respectively. - """ - exec_storm_class( - "backtype.storm.command.rebalance", - args=args, - jvmtype="-client", - extrajars=[USER_CONF_DIR, STORM_DIR + "/bin"]) - -def shell(resourcesdir, command, *args): - tmpjarpath = "stormshell" + str(random.randint(0, 10000000)) + ".jar" - os.system("jar cf %s %s" % (tmpjarpath, resourcesdir)) - runnerargs = [tmpjarpath, command] - runnerargs.extend(args) - exec_storm_class( - "backtype.storm.command.shell_submission", - args=runnerargs, - jvmtype="-client", - extrajars=[USER_CONF_DIR], - fork=True) - os.system("rm " + tmpjarpath) - -def repl(): - """Syntax: [storm repl] - - Opens up a Clojure REPL with the storm jars and configuration - on the classpath. Useful for debugging. - """ - cppaths = [CLUSTER_CONF_DIR] - exec_storm_class("clojure.lang.Repl", jvmtype="-client", extrajars=cppaths) - -def nimbus(klass="backtype.storm.daemon.nimbus"): - """Syntax: [storm nimbus] - - Launches the nimbus daemon. This command should be run under - supervision with a tool like daemontools or monit. - - See Setting up a Storm cluster for more information. - (http://storm.incubator.apache.org/documentation/Setting-up-a-Storm-cluster) - """ - cppaths = [CLUSTER_CONF_DIR] - jvmopts = parse_args(confvalue("nimbus.childopts", cppaths)) + [ - "-Dlogfile.name=nimbus.log", - "-Dlogback.configurationFile=" + STORM_DIR + "/logback/cluster.xml", - ] - exec_storm_class( - klass, - jvmtype="-server", - extrajars=cppaths, - jvmopts=jvmopts) - -def supervisor(klass="backtype.storm.daemon.supervisor"): - """Syntax: [storm supervisor] - - Launches the supervisor daemon. This command should be run - under supervision with a tool like daemontools or monit. - - See Setting up a Storm cluster for more information. - (http://storm.incubator.apache.org/documentation/Setting-up-a-Storm-cluster) - """ - cppaths = [CLUSTER_CONF_DIR] - jvmopts = parse_args(confvalue("supervisor.childopts", cppaths)) + [ - "-Dlogfile.name=supervisor.log", - "-Dlogback.configurationFile=" + STORM_DIR + "/logback/cluster.xml", - ] - exec_storm_class( - klass, - jvmtype="-server", - extrajars=cppaths, - jvmopts=jvmopts) - -def ui(): - """Syntax: [storm ui] - - Launches the UI daemon. The UI provides a web interface for a Storm - cluster and shows detailed stats about running topologies. This command - should be run under supervision with a tool like daemontools or monit. - - See Setting up a Storm cluster for more information. - (http://storm.incubator.apache.org/documentation/Setting-up-a-Storm-cluster) - """ - cppaths = [CLUSTER_CONF_DIR] - jvmopts = parse_args(confvalue("ui.childopts", cppaths)) + [ - "-Dlogfile.name=ui.log", - "-Dlogback.configurationFile=" + STORM_DIR + "/logback/cluster.xml", - ] - exec_storm_class( - "backtype.storm.ui.core", - jvmtype="-server", - jvmopts=jvmopts, - extrajars=[STORM_DIR, CLUSTER_CONF_DIR]) - -def logviewer(): - """Syntax: [storm logviewer] - - Launches the log viewer daemon. It provides a web interface for viewing - storm log files. This command should be run under supervision with a - tool like daemontools or monit. - - See Setting up a Storm cluster for more information. - (http://storm.incubator.apache.org/documentation/Setting-up-a-Storm-cluster) - """ - cppaths = [CLUSTER_CONF_DIR] - jvmopts = parse_args(confvalue("logviewer.childopts", cppaths)) + [ - "-Dlogfile.name=logviewer.log", - "-Dlogback.configurationFile=" + STORM_DIR + "/logback/cluster.xml", - ] - exec_storm_class( - "backtype.storm.daemon.logviewer", - jvmtype="-server", - jvmopts=jvmopts, - extrajars=[STORM_DIR, CLUSTER_CONF_DIR]) - -def drpc(): - """Syntax: [storm drpc] - - Launches a DRPC daemon. This command should be run under supervision - with a tool like daemontools or monit. - - See Distributed RPC for more information. - (http://storm.incubator.apache.org/documentation/Distributed-RPC) - """ - cppaths = [CLUSTER_CONF_DIR] - jvmopts = parse_args(confvalue("drpc.childopts", cppaths)) + [ - "-Dlogfile.name=drpc.log", - "-Dlogback.configurationFile=" + STORM_DIR + "/logback/cluster.xml" - ] - exec_storm_class( - "backtype.storm.daemon.drpc", - jvmtype="-server", - jvmopts=jvmopts, - extrajars=[CLUSTER_CONF_DIR]) - -def dev_zookeeper(): - """Syntax: [storm dev-zookeeper] - - Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and - "storm.zookeeper.port" as its port. This is only intended for development/testing, the - Zookeeper instance launched is not configured to be used in production. - """ - cppaths = [CLUSTER_CONF_DIR] - exec_storm_class( - "backtype.storm.command.dev_zookeeper", - jvmtype="-server", - extrajars=[CLUSTER_CONF_DIR]) - -def version(): - """Syntax: [storm version] - - Prints the version number of this Storm release. - """ - releasefile = STORM_DIR + "/RELEASE" - if os.path.exists(releasefile): - print open(releasefile).readline().strip() - else: - print "Unknown" - -def print_classpath(): - """Syntax: [storm classpath] - - Prints the classpath used by the storm client when running commands. - """ - print get_classpath([]) - -def print_commands(): - """Print all client commands and link to documentation""" - print "Commands:\n\t", "\n\t".join(sorted(COMMANDS.keys())) - print "\nHelp:", "\n\thelp", "\n\thelp " - print "\nDocumentation for the storm client can be found at http://storm.incubator.apache.org/documentation/Command-line-client.html\n" - print "Configs can be overridden using one or more -c flags, e.g. \"storm list -c nimbus.host=nimbus.mycompany.com\"\n" - -def print_usage(command=None): - """Print one help message or list of available commands""" - if command != None: - if COMMANDS.has_key(command): - print (COMMANDS[command].__doc__ or - "No documentation provided for <%s>" % command) - else: - print "<%s> is not a valid command" % command - else: - print_commands() - -def unknown_command(*args): - print "Unknown command: [storm %s]" % ' '.join(sys.argv[1:]) - print_usage() - -COMMANDS = {"jar": jar, "kill": kill, "shell": shell, "nimbus": nimbus, "ui": ui, "logviewer": logviewer, - "drpc": drpc, "supervisor": supervisor, "localconfvalue": print_localconfvalue, - "remoteconfvalue": print_remoteconfvalue, "repl": repl, "classpath": print_classpath, - "activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage, - "list": listtopos, "dev-zookeeper": dev_zookeeper, "version": version} - -def parse_config(config_list): - global CONFIG_OPTS - if len(config_list) > 0: - for config in config_list: - CONFIG_OPTS.append(config) - -def parse_config_opts(args): - curr = args[:] - curr.reverse() - config_list = [] - args_list = [] - - while len(curr) > 0: - token = curr.pop() - if token == "-c": - config_list.append(curr.pop()) - elif token == "--config": - global CONFFILE - CONFFILE = curr.pop() - else: - args_list.append(token) - - return config_list, args_list - -def main(): - if len(sys.argv) <= 1: - print_usage() - sys.exit(-1) - global CONFIG_OPTS - config_list, args = parse_config_opts(sys.argv[1:]) - parse_config(config_list) - COMMAND = args[0] - ARGS = args[1:] - (COMMANDS.get(COMMAND, unknown_command))(*ARGS) - -if __name__ == "__main__": - main() +# STORM-2486: Prevent `cd` from printing the target directory. +unset CDPATH + +# Resolve links - $0 may be a softlink +PRG="${0}" + +while [ -h "${PRG}" ]; do + ls=`ls -ld "${PRG}"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "${PRG}"`/"$link" + fi +done + +# check for version +if [ -z $PYTHON ]; then + PYTHON="/usr/bin/env python3" +fi +majversion=`$PYTHON -V 2>&1 | awk '{print $2}' | cut -d'.' -f1` +minversion=`$PYTHON -V 2>&1 | awk '{print $2}' | cut -d'.' -f2` +numversion=$(( 10 * $majversion + $minversion)) +if (( $numversion < 30 )); then + echo "Need Python version > 3.0" + exit 1 +fi + +STORM_BIN_DIR=`dirname ${PRG}` +export STORM_BASE_DIR=`cd ${STORM_BIN_DIR}/..;pwd` + +#check to see if the conf dir or file is given as an optional argument +if [ $# -gt 1 ]; then + if [ "--config" = "$1" ]; then + conf_file=$2 + if [ -d "$conf_file" ]; then + conf_file=$conf_file/storm.yaml + fi + if [ ! -f "$conf_file" ]; then + echo "Error: Cannot find configuration file: $conf_file" + exit 1 + fi + STORM_CONF_FILE=$conf_file + STORM_CONF_DIR=`dirname $conf_file` + fi +fi + +export STORM_CONF_DIR="${STORM_CONF_DIR:-$STORM_BASE_DIR/conf}" +export STORM_CONF_FILE="${STORM_CONF_FILE:-$STORM_BASE_DIR/conf/storm.yaml}" + +if [ -f "${STORM_CONF_DIR}/storm-env.sh" ]; then + . "${STORM_CONF_DIR}/storm-env.sh" +fi + +exec "${STORM_BIN_DIR}/storm.py" "$@" diff --git a/bin/storm-config.cmd b/bin/storm-config.cmd deleted file mode 100644 index 9a11c34b798..00000000000 --- a/bin/storm-config.cmd +++ /dev/null @@ -1,114 +0,0 @@ -@echo off - -@rem Licensed to the Apache Software Foundation (ASF) under one -@rem or more contributor license agreements. See the NOTICE file -@rem distributed with this work for additional information -@rem regarding copyright ownership. The ASF licenses this file -@rem to you under the Apache License, Version 2.0 (the -@rem "License"); you may not use this file except in compliance -@rem with the License. You may obtain a copy of the License at -@rem -@rem http://www.apache.org/licenses/LICENSE-2.0 -@rem -@rem Unless required by applicable law or agreed to in writing, software -@rem distributed under the License is distributed on an "AS IS" BASIS, -@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -@rem See the License for the specific language governing permissions and -@rem limitations under the License. - - -set STORM_HOME=%~dp0 -for %%i in (%STORM_HOME%.) do ( - set STORM_HOME=%%~dpi -) -if "%STORM_HOME:~-1%" == "\" ( - set STORM_HOME=%STORM_HOME:~0,-1% -) - -if not exist %STORM_HOME%\lib\storm*.jar ( - @echo +================================================================+ - @echo ^| Error: STORM_HOME is not set correctly ^| - @echo +----------------------------------------------------------------+ - @echo ^| Please set your STORM_HOME variable to the absolute path of ^| - @echo ^| the directory that contains the storm distribution ^| - @echo +================================================================+ - exit /b 1 -) - -set STORM_BIN_DIR=%STORM_HOME%\bin - -if not defined STORM_CONF_DIR ( - set STORM_CONF_DIR=%STORM_HOME%\conf -) - -@rem -@rem setup java environment variables -@rem - -if not defined JAVA_HOME ( - set JAVA_HOME=c:\apps\java\openjdk7 -) - -if not exist %JAVA_HOME%\bin\java.exe ( - echo Error: JAVA_HOME is incorrectly set. - goto :eof -) - -set JAVA=%JAVA_HOME%\bin\java -set JAVA_HEAP_MAX=-Xmx1024m - -@rem -@rem check envvars which might override default args -@rem - -if defined STORM_HEAPSIZE ( - set JAVA_HEAP_MAX=-Xmx%STORM_HEAPSIZE%m -) - -@rem -@rem CLASSPATH initially contains %STORM_CONF_DIR% -@rem - -set CLASSPATH=%STORM_HOME%\*;%STORM_CONF_DIR% -set CLASSPATH=%CLASSPATH%;%JAVA_HOME%\lib\tools.jar - -@rem -@rem add libs to CLASSPATH -@rem - -set CLASSPATH=!CLASSPATH!;%STORM_HOME%\lib\* - -if not defined STORM_LOG_DIR ( - set STORM_LOG_DIR=%STORM_HOME%\logs -) - -if not defined STORM_LOGBACK_CONFIGURATION_FILE ( - set STORM_LOGBACK_CONFIGURATION_FILE=%STORM_HOME%\logback\cluster.xml -) -%JAVA% -client -Dstorm.options= -Dstorm.conf.file= -cp %CLASSPATH% backtype.storm.command.config_value java.library.path > temp.txt - -FOR /F "delims=" %%i in (temp.txt) do ( - FOR /F "tokens=1,* delims= " %%a in ("%%i") do ( - if %%a == VALUE: ( - set JAVA_LIBRARY_PATH=%%b - goto :storm_opts) - ) -) - - -:storm_opts - set STORM_OPTS=-Dstorm.options= -Dstorm.home=%STORM_HOME% -Djava.library.path=%JAVA_LIBRARY_PATH% - set STORM_OPTS=%STORM_OPTS% -Dlogback.configurationFile=%STORM_LOGBACK_CONFIGURATION_FILE% - set STORM_OPTS=%STORM_OPTS% -Dstorm.log.dir=%STORM_LOG_DIR% - del /F temp.txt - - -if not defined STORM_SERVER_OPTS ( - set STORM_SERVER_OPTS=-server -) - -if not defined STORM_CLIENT_OPTS ( - set STORM_CLIENT_OPTS=-client -) - -:eof diff --git a/bin/storm-kafka-monitor b/bin/storm-kafka-monitor new file mode 100755 index 00000000000..9bd11054cb5 --- /dev/null +++ b/bin/storm-kafka-monitor @@ -0,0 +1,52 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Resolve links - $0 may be a softlink +PRG="${0}" + +while [ -h "${PRG}" ]; do + ls=`ls -ld "${PRG}"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "${PRG}"`/"$link" + fi +done + +STORM_BIN_DIR=`dirname ${PRG}` +export STORM_BASE_DIR=`cd ${STORM_BIN_DIR}/..;pwd` +export STORM_CONF_DIR="${STORM_CONF_DIR:-$STORM_BASE_DIR/conf}" + +if [ -f "${STORM_CONF_DIR}/storm-env.sh" ]; then + . "${STORM_CONF_DIR}/storm-env.sh" +fi + +STORM_JAAS_CONF_PARAM="" +JAAS_FILE="${STORM_CONF_DIR}/storm_jaas.conf" +if [ -f $JAAS_FILE ]; then + STORM_JAAS_CONF_PARAM="-Djava.security.auth.login.config=${JAAS_FILE}" +fi +# Which java to use +if [ -z "$JAVA_HOME" ]; then + JAVA="java" +else + JAVA="$JAVA_HOME/bin/java" +fi +exec $JAVA $STORM_JAAS_CONF_PARAM $STORM_JAR_JVM_OPTS -cp "$STORM_BASE_DIR/lib-tools/storm-kafka-monitor/*" org.apache.storm.kafka.monitor.KafkaOffsetLagUtil "$@" diff --git a/bin/storm.cmd b/bin/storm.cmd deleted file mode 100644 index 17c01d6dfa5..00000000000 --- a/bin/storm.cmd +++ /dev/null @@ -1,246 +0,0 @@ -@echo off - -@rem Licensed to the Apache Software Foundation (ASF) under one -@rem or more contributor license agreements. See the NOTICE file -@rem distributed with this work for additional information -@rem regarding copyright ownership. The ASF licenses this file -@rem to you under the Apache License, Version 2.0 (the -@rem "License"); you may not use this file except in compliance -@rem with the License. You may obtain a copy of the License at -@rem -@rem http://www.apache.org/licenses/LICENSE-2.0 -@rem -@rem Unless required by applicable law or agreed to in writing, software -@rem distributed under the License is distributed on an "AS IS" BASIS, -@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -@rem See the License for the specific language governing permissions and -@rem limitations under the License. - -@rem The storm command script -@rem -@rem Environment Variables -@rem -@rem JAVA_HOME The java implementation to use. Overrides JAVA_HOME. -@rem -@rem STORM_CLASSPATH Extra Java CLASSPATH entries. -@rem -@rem STORM_HEAPSIZE The maximum amount of heap to use, in MB. -@rem Default is 1000. -@rem -@rem STORM_OPTS Extra Java runtime options. -@rem -@rem STORM_CONF_DIR Alternate conf dir. Default is ${STORM_HOME}/conf. -@rem -@rem STORM_ROOT_LOGGER The root appender. Default is INFO,console -@rem - -:main - setlocal enabledelayedexpansion - - call %~dp0storm-config.cmd - - set storm-command=%1 - if not defined storm-command ( - goto print_usage - ) - - call :make_command_arguments %* - - set shellcommands=classpath help version - for %%i in ( %shellcommands% ) do ( - if %storm-command% == %%i set shellcommand=true - ) - if defined shellcommand ( - call :%storm-command% %* - goto :eof - ) - - set corecommands=activate deactivate dev-zookeeper drpc kill list nimbus logviewer rebalance repl shell supervisor ui - for %%i in ( %corecommands% ) do ( - if %storm-command% == %%i set corecommand=true - ) - if defined corecommand ( - call :%storm-command% %storm-command-arguments% - ) else ( - set CLASS=%storm-command% - ) - - if %storm-command% == jar ( - set STORM_OPTS=%STORM_CLIENT_OPTS% %STORM_OPTS% -Dstorm.jar=%2 - set CLASSPATH=%CLASSPATH%;%2 - set CLASS=%3 - set storm-command-arguments=%4 %5 %6 %7 %8 %9 - ) - - if not defined STORM_LOG_FILE ( - set STORM_LOG_FILE=-Dlogfile.name=%storm-command%.log - ) - - if defined STORM_DEBUG ( - %JAVA% %JAVA_HEAP_MAX% %STORM_OPTS% %STORM_LOG_FILE% %CLASS% %storm-command-arguments% - ) - set path=%PATH%;%STORM_BIN_DIR%;%STORM_SBIN_DIR% - call start /b %JAVA% %JAVA_HEAP_MAX% %STORM_OPTS% %STORM_LOG_FILE% %CLASS% %storm-command-arguments% - goto :eof - - -:activate - set CLASS=backtype.storm.command.activate - set STORM_OPTS=%STORM_CLIENT_OPTS% %STORM_OPTS% - goto :eof - -:classpath - echo %CLASSPATH% - goto :eof - -:deactivate - set CLASS=backtype.storm.command.deactivate - set STORM_OPTS=%STORM_CLIENT_OPTS% %STORM_OPTS% - goto :eof - -:dev-zookeeper - set CLASS=backtype.storm.command.dev_zookeeper - set STORM_OPTS=%STORM_SERVER_OPTS% %STORM_OPTS% - goto :eof - -:drpc - set CLASS=backtype.storm.daemon.drpc - %JAVA% -client -Dstorm.options= -Dstorm.conf.file= -cp %CLASSPATH% backtype.storm.command.config_value drpc.childopts > temp.txt - FOR /F "delims=" %%i in (temp.txt) do ( - FOR /F "tokens=1,* delims= " %%a in ("%%i") do ( - if %%a == VALUE: ( - set CHILDOPTS=%%b - call :set_childopts) - ) - ) - goto :eof - -:help - call :print_usage - goto :eof - -:kill - set CLASS=backtype.storm.command.kill_topology - set STORM_OPTS=%STORM_CLIENT_OPTS% %STORM_OPTS% - goto :eof - -:list - set CLASS=backtype.storm.command.list - set STORM_OPTS=%STORM_CLIENT_OPTS% %STORM_OPTS% - goto :eof - -:logviewer - set CLASS=backtype.storm.daemon.logviewer - %JAVA% -client -Dstorm.options= -Dstorm.conf.file= -cp %CLASSPATH% backtype.storm.command.config_value logviewer.childopts > temp.txt - FOR /F "delims=" %%i in (temp.txt) do ( - FOR /F "tokens=1,* delims= " %%a in ("%%i") do ( - if %%a == VALUE: ( - set CHILDOPTS=%%b - call :set_childopts) - ) - ) - goto :eof - -:nimbus - set CLASS=backtype.storm.daemon.nimbus - %JAVA% -client -Dstorm.options= -Dstorm.conf.file= -cp %CLASSPATH% backtype.storm.command.config_value nimbus.childopts > temp.txt - FOR /F "delims=" %%i in (temp.txt) do ( - FOR /F "tokens=1,* delims= " %%a in ("%%i") do ( - if %%a == VALUE: ( - set CHILDOPTS=%%b - call :set_childopts) - ) - ) - goto :eof - -:rebalance - set CLASS=backtype.storm.command.rebalance - set STORM_OPTS=%STORM_CLIENT_OPTS% %STORM_OPTS% - goto :eof - -:repl - set CLASS=clojure.main - set STORM_OPTS=%STORM_CLIENT_OPTS% %STORM_OPTS% - goto :eof - -:shell - set CLASS=backtype.storm.command.shell_submission - set STORM_OPTS=%STORM_CLIENT_OPTS% %STORM_OPTS% - goto :eof - -:supervisor - set CLASS=backtype.storm.daemon.supervisor - %JAVA% -client -Dstorm.options= -Dstorm.conf.file= -cp %CLASSPATH% backtype.storm.command.config_value supervisor.childopts > temp.txt - FOR /F "delims=" %%i in (temp.txt) do ( - FOR /F "tokens=1,* delims= " %%a in ("%%i") do ( - if %%a == VALUE: ( - set CHILDOPTS=%%b - call :set_childopts) - ) - ) - goto :eof - -:ui - set CLASS=backtype.storm.ui.core - set CLASSPATH=%CLASSPATH%;%STORM_HOME% - %JAVA% -client -Dstorm.options= -Dstorm.conf.file= -cp %CLASSPATH% backtype.storm.command.config_value ui.childopts > temp.txt - FOR /F "delims=" %%i in (temp.txt) do ( - FOR /F "tokens=1,* delims= " %%a in ("%%i") do ( - if %%a == VALUE: ( - set CHILDOPTS=%%b - call :set_childopts) - ) - ) - goto :eof - -:version - type %STORM_HOME%\RELEASE - goto :eof - -:make_command_arguments - if "%2" == "" goto :eof - set _count=0 - set _shift=1 - for %%i in (%*) do ( - set /a _count=!_count!+1 - if !_count! GTR %_shift% ( - if not defined _arguments ( - set _arguments=%%i - ) else ( - set _arguments=!_arguments! %%i - ) - ) - ) - set storm-command-arguments=%_arguments% - goto :eof - -:set_childopts - set STORM_OPTS=%STORM_SERVER_OPTS% %STORM_OPTS% %CHILDOPTS% - del /F temp.txt - goto :eof - -:print_usage - @echo Usage: storm COMMAND - @echo where COMMAND is one of: - @echo activate activates the specified topology's spouts - @echo classpath prints the classpath used by the storm client when running commands - @echo deactivate deactivates the specified topology's spouts - @echo dev-zookeeper launches a fresh dev/test Zookeeper server - @echo drpc launches a DRPC daemon - @echo help - @echo jar ^ run a jar file - @echo kill kills the topology with the name topology-name - @echo list list the running topologies and their statuses - @echo nimbus launches the nimbus daemon - @echo rebalance redistribute or change the parallelism of a running topology - @echo repl opens up a Clojure REPL - @echo shell storm shell - @echo supervisor launches the supervisor daemon - @echo ui launches the UI daemon - @echo version print the version - @echo. - @echo or - @echo CLASSNAME run the class named CLASSNAME - @echo Most commands print help when invoked w/o parameters. - -endlocal diff --git a/bin/storm.ps1 b/bin/storm.ps1 new file mode 100644 index 00000000000..42fff9d1d49 --- /dev/null +++ b/bin/storm.ps1 @@ -0,0 +1,69 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Resolve links - $PSCommandPath may be a softlink +$PRG = $PSCommandPath; + +while((Get-Item $PRG).LinkType -eq "SymbolicLink") { + $PRG = (Get-Item $PRG).Target; +} + +# Check for Python version +$PythonVersion = (& python3 -V 2>&1).Split(" ")[1]; +$PythonMajor = [int]$PythonVersion.Split(".")[0]; +$PythonMinor = [int]$PythonVersion.Split(".")[1]; +$PythonNumVersion = $PythonMajor * 10 + $PythonMinor; +if($PythonNumVersion -le 30) { + Write-Output "Need Python version > 3.0"; + exit 1; +} + +$STORM_BIN_DIR = Split-Path -Parent $PRG; +$env:STORM_BASE_DIR = Split-Path -Parent $STORM_BIN_DIR; + +# Check to see if the conf dir or file is given as an optional argument +if($args.Length -ge 1) { + if("--config" -eq $args.get(0)) { + $ConfFile = $args.get(1); + if(-not (Test-Path $ConfFile)) { + Write-Output ("Error: Path {0} does not exist" -f $ConfFile); + exit 1; + } + if((Get-Item $ConfFile).PsIsContainer) { + $ConfFile=[io.path]::combine($ConfFile, "storm.yaml"); + } + if(-not (Test-Path $ConfFile)) { + Write-Output ("Error: Path {0} does not exist" -f $ConfFile); + exit 1; + } + $STORM_CONF_FILE = $ConfFile; + $STORM_CONF_DIR = Split-Path -Parent $STORM_CONF_FILE; + } +} + +$env:STORM_CONF_DIR = if($null -ne $STORM_CONF_DIR) { $STORM_CONF_DIR; } else { [io.path]::combine($env:STORM_BASE_DIR, "conf"); } +$env:STORM_CONF_FILE = if($null -ne $STORM_CONF_FILE) { $STORM_CONF_FILE; } else { [io.path]::combine($env:STORM_BASE_DIR, "conf", "storm.yaml"); } + +$StormEnvPath = [io.path]::combine($env:STORM_CONF_DIR, "storm-env.ps1"); +if(Test-Path $StormEnvPath) { + . $StormEnvPath; +} + +$ArgsForProcess = @(([io.path]::combine("$STORM_BIN_DIR", "storm.py"))) + $args +& python3 $ArgsForProcess + +exit $LastExitCode diff --git a/bin/storm.py b/bin/storm.py new file mode 100755 index 00000000000..956abde0018 --- /dev/null +++ b/bin/storm.py @@ -0,0 +1,1436 @@ +#!/usr/bin/env python3 + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import argparse +import json +import os +import shlex +import subprocess +import sys +from random import randint + +from argparse import HelpFormatter +from operator import attrgetter + +if sys.version_info[0] == 2: + raise Exception("Python version 2 is not supported. Please download and use python3") + +import configparser +from urllib.parse import quote_plus + + +class SortingHelpFormatter(HelpFormatter): + def add_arguments(self, actions): + actions = sorted(actions, key=attrgetter('option_strings')) + super(SortingHelpFormatter, self).add_arguments(actions) + + +def is_windows(): + return sys.platform.startswith('win') + + +def identity(x): + return x + + +def cygpath(x): + command = ["cygpath", "-wp", x] + p = subprocess.Popen(command, stdout=subprocess.PIPE) + output, errors = p.communicate() + lines = output.split(os.linesep) + return lines[0] + + +def get_config_opts(storm_config_opts): + return "-Dstorm.options=" + ','.join([quote_plus(s) for s in storm_config_opts]) + + +def get_jars_full(adir): + files = [] + if os.path.isdir(adir): + files = os.listdir(adir) + elif os.path.exists(adir): + files = [adir] + + return [os.path.join(adir, f) for f in files if f.endswith(".jar")] + + +def get_wildcard_dir(path): + """If given path is a dir, make it a wildcard so the JVM will include all JARs in the directory.""" + ret = [] + if os.path.isdir(path): + ret = [(os.path.join(path, "*"))] + elif os.path.exists(path): + ret = [path] + return ret + + +def get_java_cmd(): + cmd = 'java' if not is_windows() else 'java.exe' + if JAVA_HOME: + cmd = os.path.join(JAVA_HOME, 'bin', cmd) + return cmd + + +def confvalue(name, storm_config_opts, extrapaths, overriding_conf_file=None, daemon=True): + command = [ + JAVA_CMD, "-client", get_config_opts(storm_config_opts), + "-Dstorm.conf.file=" + (overriding_conf_file if overriding_conf_file else ""), + "-cp", get_classpath(extrajars=extrapaths, daemon=daemon), "org.apache.storm.command.ConfigValue", name + ] + output = subprocess.Popen(command, stdout=subprocess.PIPE).communicate()[0] + if not isinstance(output, str): + output = output.decode('utf-8') + lines = output.split(os.linesep) + for line in lines: + tokens = line.split(" ") + if tokens[0] == "VALUE:": + return " ".join(tokens[1:]) + return "" + + +def get_classpath(extrajars, daemon=True, client=False): + ret = get_wildcard_dir(STORM_DIR) + if client: + ret.extend(get_wildcard_dir(STORM_WORKER_LIB_DIR)) + else: + ret.extend(get_wildcard_dir(STORM_LIB_DIR)) + ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib"))) + if daemon: + ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib-daemon"))) + if STORM_EXT_CLASSPATH: + ret.append(STORM_EXT_CLASSPATH) + if daemon and STORM_EXT_CLASSPATH_DAEMON: + ret.append(STORM_EXT_CLASSPATH_DAEMON) + ret.extend(extrajars) + return NORMAL_CLASS_PATH(os.pathsep.join(ret)) + + +def init_storm_env(within_unittest=False): + + global NORMAL_CLASS_PATH, STORM_DIR, USER_CONF_DIR, STORM_CONF_DIR, STORM_WORKER_LIB_DIR, STORM_LIB_DIR,\ + STORM_TOOLS_LIB_DIR, STORM_WEBAPP_LIB_DIR, STORM_BIN_DIR, STORM_LOG4J2_CONF_DIR, STORM_SUPERVISOR_LOG_FILE,\ + CLUSTER_CONF_DIR, JAR_JVM_OPTS, JAVA_HOME, JAVA_CMD, CONF_FILE, STORM_EXT_CLASSPATH, \ + STORM_EXT_CLASSPATH_DAEMON, LOCAL_TTL_DEFAULT + + NORMAL_CLASS_PATH = cygpath if sys.platform == 'cygwin' else identity + STORM_DIR = os.sep.join(os.path.realpath( __file__ ).split(os.sep)[:-2]) + USER_CONF_DIR = os.path.expanduser("~" + os.sep + ".storm") + STORM_CONF_DIR = os.getenv('STORM_CONF_DIR', None) + + CLUSTER_CONF_DIR = STORM_CONF_DIR if STORM_CONF_DIR else os.path.join(STORM_DIR, "conf") + + if not os.path.isfile(os.path.join(USER_CONF_DIR, "storm.yaml")): + USER_CONF_DIR = CLUSTER_CONF_DIR + + STORM_WORKER_LIB_DIR = os.path.join(STORM_DIR, "lib-worker") + STORM_LIB_DIR = os.path.join(STORM_DIR, "lib") + + STORM_TOOLS_LIB_DIR = os.path.join(STORM_DIR, "lib-tools") + STORM_WEBAPP_LIB_DIR = os.path.join(STORM_DIR, "lib-webapp") + STORM_BIN_DIR = os.path.join(STORM_DIR, "bin") + STORM_LOG4J2_CONF_DIR = os.path.join(STORM_DIR, "log4j2") + STORM_SUPERVISOR_LOG_FILE = os.getenv('STORM_SUPERVISOR_LOG_FILE', "supervisor.log") + + CONF_FILE = "" + JAR_JVM_OPTS = shlex.split(os.getenv('STORM_JAR_JVM_OPTS', '')) + JAVA_HOME = os.getenv('JAVA_HOME', None) + JAVA_CMD = get_java_cmd() + + if JAVA_HOME and not os.path.exists(JAVA_CMD): + print(f"ERROR: JAVA_HOME is invalid. Could not find bin/java at {JAVA_HOME}.") + sys.exit(1) + + if not (within_unittest or os.path.exists(STORM_LIB_DIR)): + print("*" * 20) + print('''The storm client can only be run from within a release. +You appear to be trying to run the client from a checkout of Storm's source code. +You can download a Storm release at https://storm.apache.org/downloads.html")''') + print("*" * 20) + sys.exit(1) + + STORM_EXT_CLASSPATH = os.getenv('STORM_EXT_CLASSPATH', None) + STORM_EXT_CLASSPATH_DAEMON = os.getenv('STORM_EXT_CLASSPATH_DAEMON', None) + LOCAL_TTL_DEFAULT = "20" + + ini_file = os.path.join(CLUSTER_CONF_DIR, 'storm_env.ini') + if not os.path.isfile(ini_file): + return + config = configparser.ConfigParser() + config.optionxform = str + config.read(ini_file) + options = config.options('environment') + for option in options: + value = config.get('environment', option) + os.environ[option] = value + + +def resolve_dependencies(artifacts, artifact_repositories, maven_local_repos_dir, + proxy_url, proxy_username, proxy_password): + if not artifacts: + return {} + + print(f"Resolving dependencies on demand: artifacts ({artifacts}) with repositories ({artifact_repositories})") + + if maven_local_repos_dir: + print(f"Local repository directory: {maven_local_repos_dir}") + + if proxy_url: + print(f"Proxy information: url ({proxy_url}) username ({proxy_username})") + + sys.stdout.flush() + + # storm-submit module doesn't rely on storm-core and relevant libs + extrajars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "submit-tools")) + classpath = NORMAL_CLASS_PATH(os.pathsep.join(extrajars)) + + command = [ + JAVA_CMD, "-client", "-cp", classpath, "org.apache.storm.submit.command.DependencyResolverMain" + ] + + command.extend(["--artifacts", artifacts]) + command.extend(["--artifactRepositories", artifact_repositories]) + + if maven_local_repos_dir is not None: + command.extend(["--mavenLocalRepositoryDirectory", maven_local_repos_dir]) + + if proxy_url: + command.extend(["--proxyUrl", proxy_url]) + if proxy_username: + command.extend(["--proxyUsername", proxy_username]) + command.extend(["--proxyPassword", proxy_password]) + + p = subprocess.Popen(command, stdout=subprocess.PIPE) + output, errors = p.communicate() + if p.returncode != 0: + raise RuntimeError(f"dependency handler returns non-zero code: code<{p.returncode}> syserr<{errors}>") + + if not isinstance(output, str): + output = output.decode('utf-8') + + # For debug purpose, uncomment when you need to debug DependencyResolver + # print(f"Resolved dependencies: {output}") + + try: + out_dict = json.loads(output) + return out_dict + except: + raise RuntimeError(f"dependency handler returns non-json response: sysout<{output}>", ) + + +def exec_storm_class(klass, storm_config_opts, jvmtype="-server", jvmopts=[], + extrajars=[], main_class_args=[], fork=False, daemon=True, client=False, daemonName="", + overriding_conf_file=None): + storm_log_dir = confvalue("storm.log.dir", storm_config_opts=storm_config_opts, + extrapaths=[CLUSTER_CONF_DIR], overriding_conf_file=overriding_conf_file) + if storm_log_dir is None or storm_log_dir in ["null", ""]: + storm_log_dir = os.path.join(STORM_DIR, "logs") + all_args = [ + JAVA_CMD, jvmtype, + f"-Ddaemon.name={daemonName}", + get_config_opts(storm_config_opts), + f"-Dstorm.home={STORM_DIR}", + f"-Dstorm.log.dir={storm_log_dir}", + "-Djava.library.path=" + confvalue("java.library.path", storm_config_opts, extrajars, daemon=daemon), + "-Dstorm.conf.file=" + (overriding_conf_file if overriding_conf_file else ""), + "-cp", get_classpath(extrajars, daemon, client=client), + ] + jvmopts + [klass] + list(main_class_args) + print("Running: " + " ".join(all_args)) + sys.stdout.flush() + exit_code = 0 + if fork: + exit_code = os.spawnvp(os.P_WAIT, JAVA_CMD, all_args) + elif is_windows(): + # handling whitespaces in JAVA_CMD + try: + process = subprocess.Popen(all_args, stderr=sys.stderr, stdout=sys.stdout) + process.wait() + sys.exit(process.returncode) + except subprocess.CalledProcessError as e: + print(e.output) + sys.exit(e.returncode) + else: + os.execvp(JAVA_CMD, all_args) + return exit_code + + +def run_client_jar(klass, args, daemon=False, client=True, extrajvmopts=[]): + local_jars = args.jars.split(",") + jarfile = args.topology_jar_path + + artifact_to_file_jars = resolve_dependencies( + args.artifacts, args.artifactRepositories, + args.mavenLocalRepositoryDirectory, args.proxyUrl, + args.proxyUsername, args.proxyPassword + ) + + extra_jars = [jarfile, USER_CONF_DIR, STORM_BIN_DIR] + extra_jars.extend(local_jars) + extra_jars.extend(artifact_to_file_jars.values()) + exec_storm_class( + klass, args.storm_config_opts, + jvmtype="-client", + extrajars=extra_jars, + main_class_args=args.main_args, + daemon=daemon, + client=client, + jvmopts=JAR_JVM_OPTS + extrajvmopts + ["-Dstorm.jar=" + jarfile] + + ["-Dstorm.dependency.jars=" + ",".join(local_jars)] + + ["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)], + overriding_conf_file=args.config) + + +def print_localconfvalue(args): + print(args.conf_name + ": " + confvalue(args.conf_name, args.storm_config_opts, + [USER_CONF_DIR], overriding_conf_file=args.config)) + + +def print_remoteconfvalue(args): + print(args.conf_name + ": " + confvalue(args.conf_name, args.storm_config_opts, + [CLUSTER_CONF_DIR], overriding_conf_file=args.config)) + + +def initialize_main_command(): + main_parser = argparse.ArgumentParser(prog="storm", formatter_class=SortingHelpFormatter) + + subparsers = main_parser.add_subparsers(help="") + + initialize_jar_subcommand(subparsers) + initialize_localconfvalue_subcommand(subparsers) + initialize_remoteconfvalue_subcommand(subparsers) + initialize_local_subcommand(subparsers) + initialize_kill_subcommand(subparsers) + initialize_upload_credentials_subcommand(subparsers) + initialize_blobstore_subcommand(subparsers) + initialize_heartbeats_subcommand(subparsers) + initialize_activate_subcommand(subparsers) + initialize_set_log_level_subcommand(subparsers) + initialize_listtopos_subcommand(subparsers) + initialize_deactivate_subcommand(subparsers) + initialize_rebalance_subcommand(subparsers) + initialize_get_errors_subcommand(subparsers) + initialize_healthcheck_subcommand(subparsers) + initialize_kill_workers_subcommand(subparsers) + initialize_admin_subcommand(subparsers) + initialize_shell_subcommand(subparsers) + initialize_repl_subcommand(subparsers) + initialize_nimbus_subcommand(subparsers) + initialize_pacemaker_subcommand(subparsers) + initialize_supervisor_subcommand(subparsers) + initialize_ui_subcommand(subparsers) + initialize_logviewer_subcommand(subparsers) + initialize_drpc_client_subcommand(subparsers) + initialize_drpc_subcommand(subparsers) + initialize_dev_zookeeper_subcommand(subparsers) + initialize_version_subcommand(subparsers) + initialize_classpath_subcommand(subparsers) + initialize_server_classpath_subcommand(subparsers) + initialize_monitor_subcommand(subparsers) + + return main_parser + + +def initialize_localconfvalue_subcommand(subparsers): + command_help = '''Prints out the value for conf-name in the local Storm configs. + The local Storm configs are the ones in ~/.storm/storm.yaml merged + in with the configs in defaults.yaml.''' + + sub_parser = subparsers.add_parser("localconfvalue", help=command_help, formatter_class=SortingHelpFormatter) + sub_parser.add_argument("conf_name") + sub_parser.set_defaults(func=print_localconfvalue) + add_common_options(sub_parser) + + +def initialize_remoteconfvalue_subcommand(subparsers): + command_help = '''Prints out the value for conf-name in the cluster's Storm configs. + The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml + merged in with the configs in defaults.yaml. + + This command must be run on a cluster machine.''' + + sub_parser = subparsers.add_parser("remoteconfvalue", help=command_help, formatter_class=SortingHelpFormatter) + sub_parser.add_argument("conf_name") + sub_parser.set_defaults(func=print_remoteconfvalue) + add_common_options(sub_parser) + + +def add_common_options(parser, main_args=True): + parser.add_argument("--config", default=None, help="Override default storm conf file") + parser.add_argument( + "-storm_config_opts", "-c", action="http://github.com/append", default=[], + help="Override storm conf properties , e.g. nimbus.ui.port=4443" + ) + if main_args: + parser.add_argument( + "main_args", metavar="main_args", + nargs='*', help="Runs the main method with the specified arguments." + ) + + +def remove_common_options(sys_args): + flags_to_filter = ["-c", "-storm_config_opts", "--config"] + filtered_sys_args = [ + sys_args[i] for i in range(0, len(sys_args)) if (not (sys_args[i] in flags_to_filter) and ((i<1) or + not (sys_args[i - 1] in flags_to_filter))) + ] + return filtered_sys_args + + +def add_topology_jar_options(parser): + parser.add_argument( + "topology_jar_path", metavar="topology-jar-path", + help="will upload the jar at topology-jar-path when the topology is submitted." + ) + parser.add_argument( + "topology_main_class", metavar="topology-main-class", + help="main class of the topology jar being submitted" + ) + + +def add_client_jar_options(parser): + + parser.add_argument("--jars", help=''' + When you want to ship other jars which are not included to application jar, you can pass them to --jars option with comma-separated string. + For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar. + ''', default="") + + parser.add_argument("--artifacts", help=''' + When you want to ship maven artifacts and its transitive dependencies, you can pass them to --artifacts with comma-separated string. + You can also exclude some dependencies like what you're doing in maven pom. + Please add exclusion artifacts with '^' separated string after the artifact. + For example, -artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" will load jedis and kafka-clients artifact and all of transitive dependencies but exclude slf4j-api from kafka. + ''', default="") + + parser.add_argument("--artifactRepositories", help=''' + When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with a comma-separated string. + Repository format is "^". '^' is taken as separator because URL allows various characters. + For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver. + ''', default="") + + parser.add_argument("--mavenLocalRepositoryDirectory", help="You can provide local maven repository directory via --mavenLocalRepositoryDirectory if you would like to use specific directory. It might help when you don't have '.m2/repository' directory in home directory, because CWD is sometimes non-deterministic (fragile).", default="") + + parser.add_argument("--proxyUrl", help="You can also provide proxy information to let dependency resolver utilizing proxy if needed. URL representation of proxy ('http://host:port')", default="") + parser.add_argument("--proxyUsername", help="username of proxy if it requires basic auth", default="") + parser.add_argument("--proxyPassword", help="password of proxy if it requires basic auth", default="") + + +def initialize_jar_subcommand(subparsers): + jar_help = """Runs the main method of class with the specified arguments. + The storm worker dependencies and configs in ~/.storm are put on the classpath. + The process is configured so that StormSubmitter + (https://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html) + will upload the jar at topology-jar-path when the topology is submitted. + + When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology. + """ + jar_parser = subparsers.add_parser("jar", help=jar_help, formatter_class=SortingHelpFormatter) + + add_topology_jar_options(jar_parser) + add_client_jar_options(jar_parser) + + jar_parser.add_argument( + "--storm-server-classpath", + action='http://github.com/store_true', + help=''' + If for some reason you need to have the full storm classpath, + not just the one for the worker you may include the command line option `--storm-server-classpath`. + Please be careful because this will add things to the classpath + that will not be on the worker classpath + and could result in the worker not running.''' + ) + + jar_parser.set_defaults(func=jar) + add_common_options(jar_parser) + + +def initialize_local_subcommand(subparsers): + command_help = """Runs the main method of class with the specified arguments but pointing to a local cluster + The storm jars and configs in ~/.storm are put on the classpath. + The process is configured so that StormSubmitter + (https://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html) + and others will interact with a local cluster instead of the one configured by default. + + Most options should work just like with the storm jar command. + """ + sub_parser = subparsers.add_parser("local", help=command_help, formatter_class=SortingHelpFormatter) + + add_topology_jar_options(sub_parser) + add_client_jar_options(sub_parser) + + sub_parser.add_argument( + "--local-ttl", + help="sets the number of seconds the local cluster will run for before it shuts down", + default=LOCAL_TTL_DEFAULT + ) + + sub_parser.add_argument( + "--java-debug", + help="lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK" + + "e.g transport=dt_socket,address=localhost:8000 will open up a debugging server on port 8000", + default=None + ) + + sub_parser.add_argument( + "--local-zookeeper", + help="""if using an external zookeeper sets the connection string to use for it.""", + default=None + ) + + sub_parser.set_defaults(func=local) + add_common_options(sub_parser) + + +def initialize_kill_subcommand(subparsers): + command_help = """Kills the topology with the name topology-name. Storm will + first deactivate the topology's spouts for the duration of + the topology's message timeout to allow all messages currently + being processed to finish processing. Storm will then shutdown + the workers and clean up their state. + """ + sub_parser = subparsers.add_parser("kill", help=command_help, formatter_class=SortingHelpFormatter) + + sub_parser.add_argument("topology-name") + + sub_parser.add_argument( + "-w", "--wait-time-secs", + help="""override the length of time Storm waits between deactivation and shutdown""", + default=None, type=check_non_negative + ) + + sub_parser.set_defaults(func=kill) + add_common_options(sub_parser) + + +def check_non_negative(value): + ivalue = int(value) + if ivalue < 0: + raise argparse.ArgumentTypeError(f"{value} is not a non-zero integer") + return ivalue + + +def check_positive(value): + ivalue = int(value) + if ivalue <= 0: + raise argparse.ArgumentTypeError("%s is not a positive integer" % value) + return ivalue + + +def initialize_upload_credentials_subcommand(subparsers): + command_help = """Uploads a new set of credentials to a running topology.""" + sub_parser = subparsers.add_parser("upload-credentials", help=command_help, formatter_class=SortingHelpFormatter) + + sub_parser.add_argument("topology-name") + + sub_parser.add_argument( + "-f", "--file", default=None, + help="""provide a properties file with credentials in it to be uploaded""" + ) + + sub_parser.add_argument( + "-u", "--user", default=None, + help="""name of the owner of the topology (security precaution)""" + ) + + # If set, this flag will become true meaning that user expects non-empty creds to be uploaded. + # Command exits with non-zero code if uploaded creds collection is empty. + sub_parser.add_argument( + "-e", "--exception-when-empty", action='http://github.com/store_true', + help="""If specified, throw exception if there are no credentials uploaded. + Otherwise, it is default to be false""" + ) + + sub_parser.add_argument( + "cred_list", nargs='*', help="List of credkeys and their values [credkey credvalue]*" + ) + + sub_parser.set_defaults(func=upload_credentials) + add_common_options(sub_parser) + +def initialize_blobstore_subcommand(subparsers): + sub_parser = subparsers.add_parser("blobstore", formatter_class=SortingHelpFormatter) + command_help = """ + For example, the following would create a mytopo:data.tgz key using the data + stored in data.tgz. User alice would have full access, bob would have + read/write access and everyone else would have read access. + storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r + """ + sub_sub_parsers = sub_parser.add_subparsers(help=command_help) + + list_parser = sub_sub_parsers.add_parser( + "list", help="lists blobs currently in the blob store", formatter_class=SortingHelpFormatter + ) + list_parser.add_argument( + "keys", nargs='*') + add_common_options(list_parser, main_args=False) + + cat_parser = sub_sub_parsers.add_parser( + "cat", help="read a blob and then either write it to a file, or STDOUT (requires read access).", formatter_class=SortingHelpFormatter + ) + cat_parser.add_argument("KEY") + cat_parser.add_argument("-f", '--FILE', default=None) + add_common_options(cat_parser) + + create_parser = sub_sub_parsers.add_parser( + "create", help="create a new blob. Contents comes from a FILE or STDIN", formatter_class=SortingHelpFormatter + ) + create_parser.add_argument("KEY") + create_parser.add_argument("-f", '--file', default=None) + create_parser.add_argument( + "-a", '--acl', default=None, + help="ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list." + ) + create_parser.add_argument("-r", "--replication-factor", default=None, type=check_positive) + add_common_options(create_parser) + + update_parser = sub_sub_parsers.add_parser( + "update", help="update the contents of a blob. Contents comes from a FILE or STDIN (requires write access).", formatter_class=SortingHelpFormatter, + ) + update_parser.add_argument("KEY") + update_parser.add_argument("-f", '--FILE', default=None) + add_common_options(update_parser) + + delete_parser = sub_sub_parsers.add_parser( + "delete", help="delete an entry from the blob store (requires write access).", formatter_class=SortingHelpFormatter + ) + delete_parser.add_argument("KEY") + add_common_options(delete_parser) + + set_acl_parser = sub_sub_parsers.add_parser( + "set-acl", help="set acls for the given key", formatter_class=SortingHelpFormatter + ) + set_acl_parser.add_argument("KEY") + set_acl_parser.add_argument( + "-s", '--set', default=None, + help="""ACL is in the form [uo]:[username]:[r-][w-][a-] + can be comma separated list (requires admin access).""" + ) + add_common_options(set_acl_parser) + + replication_parser = sub_sub_parsers.add_parser( + "replication", formatter_class=SortingHelpFormatter + ) + replication_parser.add_argument("KEY") + replication_parser.add_argument( + "--read", action="http://github.com/store_true", help="Used to read the replication factor of the blob", + default=None + ) + replication_parser.add_argument( + "--update", action="http://github.com/store_true", help=" It is used to update the replication factor of a blob.", + default=None + ) + replication_parser.add_argument("-r", "--replication-factor", default=None, type=check_positive) + add_common_options(replication_parser) + + sub_parser.set_defaults(func=blob) + add_common_options(sub_parser) + + +def initialize_heartbeats_subcommand(subparsers): + sub_parser = subparsers.add_parser("heartbeats") + sub_sub_parsers = sub_parser.add_subparsers() + + list_parser = sub_sub_parsers.add_parser( + "PATH", help="lists heartbeats nodes under PATH currently in the ClusterState", formatter_class=SortingHelpFormatter + ) + list_parser.add_argument("PATH") + + get_parser = sub_sub_parsers.add_parser( + "get", help="Get the heartbeat data at PATH", formatter_class=SortingHelpFormatter + ) + get_parser.add_argument("PATH") + sub_parser.set_defaults(func=heartbeats) + add_common_options(sub_parser) + + +def initialize_activate_subcommand(subparsers): + sub_parser = subparsers.add_parser( + "activate", help="Activates the specified topology's spouts.", formatter_class=SortingHelpFormatter + ) + + sub_parser.add_argument("topology-name") + + sub_parser.set_defaults(func=activate) + add_common_options(sub_parser) + + +def initialize_listtopos_subcommand(subparsers): + sub_parser = subparsers.add_parser( + "list", help="List the running topologies and their statuses.", formatter_class=SortingHelpFormatter + ) + + sub_parser.set_defaults(func=listtopos) + add_common_options(sub_parser) + + +def initialize_set_log_level_subcommand(subparsers): + sub_parser = subparsers.add_parser( + "set_log_level", help=""" + Dynamically change topology log levels + e.g. + ./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name + + Set the root logger's level to DEBUG for 30 seconds + + ./bin/storm set_log_level -l com.myapp=WARN topology-name + + Set the com.myapp logger's level to WARN for 30 seconds + + ./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name + + Set the com.myapp logger's level to WARN indefinitely, and com.myOtherLogger + to ERROR for 123 seconds + + ./bin/storm set_log_level -r com.myOtherLogger topology-name + + Clears settings, resetting back to the original level + """, formatter_class=SortingHelpFormatter + ) + + sub_parser.add_argument("-l", action="http://github.com/append", default=[], help=""" + -l [logger name]=[log level][:optional timeout] where log level is one of: + ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF + """) + sub_parser.add_argument("-r", action="http://github.com/append", default=[], help=""" + -r [logger name] + """) + sub_parser.add_argument("topology-name") + + sub_parser.set_defaults(func=set_log_level) + add_common_options(sub_parser) + + +def initialize_deactivate_subcommand(subparsers): + sub_parser = subparsers.add_parser( + "deactivate", help="Deactivates the specified topology's spouts.", formatter_class=SortingHelpFormatter + ) + + sub_parser.add_argument("topology-name") + + sub_parser.set_defaults(func=deactivate) + add_common_options(sub_parser) + + +def initialize_rebalance_subcommand(subparsers): + command_help = """ + Sometimes you may wish to spread out the workers for a running topology. + For example, let's say you have a 10 node cluster running + 4 workers per node, and then let's say you add another 10 nodes to + the cluster. You may wish to have Storm spread out the workers for the + running topology so that each node runs 2 workers. One way to do this + is to kill the topology and resubmit it, but Storm provides a "rebalance" + command that provides an easier way to do this. + + Rebalance will first deactivate the topology for the duration of the + message timeout (overridable with the -w flag) make requested adjustments to the topology + and let the scheduler try to find a better scheduling based off of the + new situation. The topology will then return to its previous state of activation + (so a deactivated topology will still be deactivated and an activated + topology will go back to being activated). + """ + sub_parser = subparsers.add_parser( + "rebalance", help=command_help, formatter_class=SortingHelpFormatter + ) + + sub_parser.add_argument( + "-w", "--wait-time-secs", + help="time to wait before starting to rebalance", + default=None, type=check_non_negative + ) + + sub_parser.add_argument( + "-n", "--num-workers", default=None, + help="change the number of requested workers", type=check_positive + ) + + sub_parser.add_argument( + "-e", "--executor", action="http://github.com/append", default=[], + help="change the number of executors for a given component e.g. --executor component_name=6" + ) + + sub_parser.add_argument( + "-r", "--resources", default=None, + help=""" + change the resources each component is requesting as used by the resource aware scheduler + e.g '{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}' + """ + ) + + sub_parser.add_argument( + "-t", "--topology-conf", default=None, + help="change the topology conf" + ) + + sub_parser.add_argument("topology-name") + + sub_parser.set_defaults(func=rebalance) + add_common_options(sub_parser) + + +def initialize_get_errors_subcommand(subparsers): + sub_parser = subparsers.add_parser( + "get-errors", help="""Get the latest error from the running topology. The returned result contains + the key value pairs for component-name and component-error for the components in error. + The result is returned in json format.""", formatter_class=SortingHelpFormatter + ) + + sub_parser.add_argument("topology-name") + + sub_parser.set_defaults(func=get_errors) + add_common_options(sub_parser) + + +def initialize_healthcheck_subcommand(subparsers): + sub_parser = subparsers.add_parser( + "node-health-check", help="""Run health checks on the local supervisor.""", formatter_class=SortingHelpFormatter + ) + + sub_parser.set_defaults(func=healthcheck) + add_common_options(sub_parser) + + +def initialize_kill_workers_subcommand(subparsers): + sub_parser = subparsers.add_parser( + "kill_workers", help="""Kill the workers running on this supervisor. This command should be run + on a supervisor node. If the cluster is running in secure mode, then user needs + to have admin rights on the node to be able to successfully kill all workers.""", formatter_class=SortingHelpFormatter + ) + + sub_parser.set_defaults(func=kill_workers) + add_common_options(sub_parser) + + +def initialize_admin_subcommand(subparsers): + sub_parser = subparsers.add_parser("admin", help="""The storm admin command provides access to several operations that can help + an administrator debug or fix a cluster.""", formatter_class=SortingHelpFormatter) + sub_sub_parsers = sub_parser.add_subparsers() + + remove_sub_sub_parser = sub_sub_parsers.add_parser( + "remove_corrupt_topologies", help="""This command should be run on a nimbus node as + the same user nimbus runs as. It will go directly to zookeeper + blobstore + and find topologies that appear to be corrupted because of missing blobs. + It will kill those topologies.""", formatter_class=SortingHelpFormatter + ) + + add_common_options(remove_sub_sub_parser) + + zk_cli_parser = sub_sub_parsers.add_parser( + "zk_cli", help="""This command will launch a zookeeper cli pointing to the + storm zookeeper instance logged in as the nimbus user. It should be run on + a nimbus server as the user nimbus runs as.""", formatter_class=SortingHelpFormatter + ) + + zk_cli_parser.add_argument( + "-s", "--server", default=None, help="""Set the connection string to use, + defaults to storm connection string""" + ) + + zk_cli_parser.add_argument( + "-t", "--time-out", default=None, help="""Set the timeout in seconds to use, defaults to storm + zookeeper timeout.""", type=check_non_negative + ) + + zk_cli_parser.add_argument( + "-w", "--write", default=None, action="http://github.com/store_true", + help="""Allow for writes, defaults to read only, we don't want to + cause problems.""" + ) + + zk_cli_parser.add_argument( + "-n", "--no-root", default=None, action="http://github.com/store_true", + help="""Don't include the storm root on the default connection string.""" + ) + + zk_cli_parser.add_argument( + "-j", "--jaas", default=None, help="""Include a jaas file that should be used when + authenticating with ZK defaults to the + java.security.auth.login.config conf""" + ) + + add_common_options(zk_cli_parser) + + creds_parser = sub_sub_parsers.add_parser( + "creds", help="""Print the credential keys for a topology.""", formatter_class=SortingHelpFormatter + ) + + creds_parser.add_argument("topology_id") + add_common_options(creds_parser) + + sub_parser.set_defaults(func=admin) + add_common_options(sub_parser) + + +def initialize_shell_subcommand(subparsers): + command_help = """ + Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages. + eg: `storm shell resources/ python3 topology.py arg1 arg2`""" + + sub_parser = subparsers.add_parser("shell", help=command_help, formatter_class=SortingHelpFormatter) + + sub_parser.add_argument("resourcesdir") + sub_parser.add_argument("command") + sub_parser.add_argument("args", nargs='*', default=[]) + + sub_parser.set_defaults(func=shell) + add_common_options(sub_parser, main_args=False) + + +def initialize_repl_subcommand(subparsers): + command_help = """ + DEPRECATED: This subcommand may be removed in a future release. + Opens up a Clojure REPL with the storm jars and configuration + on the classpath. Useful for debugging.""" + sub_parser = subparsers.add_parser("repl", help=command_help, formatter_class=SortingHelpFormatter) + + sub_parser.set_defaults(func=repl) + add_common_options(sub_parser) + + +def initialize_nimbus_subcommand(subparsers): + command_help = """ + Launches the nimbus daemon. This command should be run under + supervision with a tool like daemontools or monit. + + See Setting up a Storm cluster for more information. + (https://storm.apache.org/documentation/Setting-up-a-Storm-cluster) + """ + sub_parser = subparsers.add_parser("nimbus", help=command_help, formatter_class=SortingHelpFormatter) + sub_parser.set_defaults(func=nimbus) + add_common_options(sub_parser) + + +def initialize_pacemaker_subcommand(subparsers): + command_help = """ + Launches the Pacemaker daemon. This command should be run under + supervision with a tool like daemontools or monit. + + See Setting up a Storm cluster for more information. + (https://storm.apache.org/documentation/Setting-up-a-Storm-cluster) + """ + sub_parser = subparsers.add_parser("pacemaker", help=command_help, formatter_class=SortingHelpFormatter) + sub_parser.set_defaults(func=pacemaker) + add_common_options(sub_parser) + + +def initialize_supervisor_subcommand(subparsers): + command_help = """ + Launches the supervisor daemon. This command should be run + under supervision with a tool like daemontools or monit. + + See Setting up a Storm cluster for more information. + (https://storm.apache.org/documentation/Setting-up-a-Storm-cluster) + """ + sub_parser = subparsers.add_parser("supervisor", help=command_help, formatter_class=SortingHelpFormatter) + sub_parser.set_defaults(func=supervisor) + add_common_options(sub_parser) + +def initialize_ui_subcommand(subparsers): + command_help = """ + Launches the UI daemon. The UI provides a web interface for a Storm + cluster and shows detailed stats about running topologies. This command + should be run under supervision with a tool like daemontools or monit. + + See Setting up a Storm cluster for more information. + (https://storm.apache.org/documentation/Setting-up-a-Storm-cluster) + """ + sub_parser = subparsers.add_parser("ui", help=command_help, formatter_class=SortingHelpFormatter) + sub_parser.set_defaults(func=ui) + add_common_options(sub_parser) + + +def initialize_logviewer_subcommand(subparsers): + command_help = """ + Launches the log viewer daemon. It provides a web interface for viewing + storm log files. This command should be run under supervision with a + tool like daemontools or monit. + + See Setting up a Storm cluster for more information. + (https://storm.apache.org/documentation/Setting-up-a-Storm-cluster) + """ + sub_parser = subparsers.add_parser("logviewer", help=command_help, formatter_class=SortingHelpFormatter) + sub_parser.set_defaults(func=logviewer) + add_common_options(sub_parser) + + +def initialize_drpc_client_subcommand(subparsers): + command_help = """ + Provides a very simple way to send DRPC requests. The server and port are picked from the configs. + """ + + sub_parser = subparsers.add_parser("drpc-client", help=command_help, formatter_class=SortingHelpFormatter) + + sub_parser.add_argument( + "-f", "--function", default=None, help="""If the -f argument is supplied to set the function name all of the arguments are treated + as arguments to the function. If no function is given the arguments must + be pairs of function argument.""" + ) + sub_parser.add_argument("function_arguments", nargs='*', default=[]) + + sub_parser.set_defaults(func=drpc_client) + add_common_options(sub_parser, main_args=False) + + +def initialize_drpc_subcommand(subparsers): + command_help = """ + Launches a DRPC daemon. This command should be run under supervision + with a tool like daemontools or monit. + + See Distributed RPC for more information. + (https://storm.apache.org/documentation/Distributed-RPC) + """ + sub_parser = subparsers.add_parser("drpc", help=command_help, formatter_class=SortingHelpFormatter) + sub_parser.set_defaults(func=drpc) + add_common_options(sub_parser) + + +def initialize_dev_zookeeper_subcommand(subparsers): + command_help = """ + Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and + "storm.zookeeper.port" as its port. This is only intended for development/testing, the + Zookeeper instance launched is not configured to be used in production. + """ + sub_parser = subparsers.add_parser("dev-zookeeper", help=command_help, formatter_class=SortingHelpFormatter) + sub_parser.set_defaults(func=dev_zookeeper) + add_common_options(sub_parser) + + +def initialize_version_subcommand(subparsers): + command_help = """Prints the version number of this Storm release.""" + sub_parser = subparsers.add_parser("version", help=command_help, formatter_class=SortingHelpFormatter) + sub_parser.set_defaults(func=version) + add_common_options(sub_parser) + + +def initialize_classpath_subcommand(subparsers): + command_help = """Prints the classpath used by the storm client when running commands.""" + sub_parser = subparsers.add_parser("classpath", help=command_help, formatter_class=SortingHelpFormatter) + sub_parser.set_defaults(func=print_classpath) + add_common_options(sub_parser) + + +def initialize_server_classpath_subcommand(subparsers): + command_help = """Prints the classpath used by the storm servers when running commands.""" + sub_parser = subparsers.add_parser("server_classpath", help=command_help, formatter_class=SortingHelpFormatter) + sub_parser.set_defaults(func=print_server_classpath) + add_common_options(sub_parser) + + +def initialize_monitor_subcommand(subparsers): + command_help = """Monitor given topology's throughput interactively.""" + sub_parser = subparsers.add_parser("monitor", help=command_help, formatter_class=SortingHelpFormatter) + + sub_parser.add_argument("topology-name") + sub_parser.add_argument( + "-i", "--interval", type=check_positive, default=None, + help="""By default, poll-interval is 4 seconds""" + ) + sub_parser.add_argument("-m", "--component", default=None) + sub_parser.add_argument("-s", "--stream", default=None) + sub_parser.add_argument("-w", "--watch", default=None) + sub_parser.set_defaults(func=monitor) + add_common_options(sub_parser) + + +def jar(args): + run_client_jar( + args.topology_main_class, args, + client=not args.storm_server_classpath, daemon=False) + + +def local(args): + extrajvmopts = ["-Dstorm.local.sleeptime=" + args.local_ttl] + if args.java_debug: + extrajvmopts += ["-agentlib:jdwp=" + args.java_debug] + args.main_args = [args.topology_main_class] + args.main_args + run_client_jar( + "org.apache.storm.LocalCluster", args, + client=False, daemon=False, extrajvmopts=extrajvmopts) + +def kill(args): + exec_storm_class( + "org.apache.storm.command.KillTopology", + main_class_args=remove_common_options(sys.argv[2:]), storm_config_opts=args.storm_config_opts, + jvmtype="-client", + extrajars=[USER_CONF_DIR, STORM_BIN_DIR], + overriding_conf_file=args.config) + + +def upload_credentials(args): + if len(args.cred_list) % 2 != 0: + raise argparse.ArgumentTypeError("please provide a list of cred key and value pairs " + args.cred_list) + exec_storm_class( + "org.apache.storm.command.UploadCredentials", + main_class_args=remove_common_options(sys.argv[2:]), storm_config_opts=args.storm_config_opts, + jvmtype="-client", + extrajars=[USER_CONF_DIR, STORM_BIN_DIR], + overriding_conf_file=args.config) + + +def blob(args): + if hasattr(args, "update") and args.update and not args.replication_factor: + raise argparse.ArgumentTypeError("Replication factor needed when doing blob update") + exec_storm_class( + "org.apache.storm.command.Blobstore", + main_class_args=remove_common_options(sys.argv[2:]), storm_config_opts=args.storm_config_opts, + jvmtype="-client", + extrajars=[USER_CONF_DIR, STORM_BIN_DIR], + overriding_conf_file=args.config) + + +def heartbeats(args): + exec_storm_class( + "org.apache.storm.command.Heartbeats", + main_class_args=remove_common_options(sys.argv[2:]), storm_config_opts=args.storm_config_opts, + jvmtype="-client", + extrajars=[USER_CONF_DIR, STORM_BIN_DIR], + overriding_conf_file=args.config) + + +def activate(args): + exec_storm_class( + "org.apache.storm.command.Activate", + main_class_args=remove_common_options(sys.argv[2:]), storm_config_opts=args.storm_config_opts, + jvmtype="-client", + extrajars=[USER_CONF_DIR, STORM_BIN_DIR], + overriding_conf_file=args.config) + + +def listtopos(args): + exec_storm_class( + "org.apache.storm.command.ListTopologies", + main_class_args=remove_common_options(sys.argv[2:]), storm_config_opts=args.storm_config_opts, + jvmtype="-client", + extrajars=[USER_CONF_DIR, STORM_BIN_DIR], + overriding_conf_file=args.config) + + +def set_log_level(args): + for log_level in args.l: + try: + _, new_value = log_level.split("=") + if ":" in new_value: + _, timeout = new_value.split(":") + int(timeout) + except: + raise argparse.ArgumentTypeError("Should be in the form[logger name]=[log level][:optional timeout]") + exec_storm_class( + "org.apache.storm.command.SetLogLevel", + main_class_args=remove_common_options(sys.argv[2:]), storm_config_opts=args.storm_config_opts, + jvmtype="-client", + extrajars=[USER_CONF_DIR, STORM_BIN_DIR], + overriding_conf_file=args.config) + + +def deactivate(args): + exec_storm_class( + "org.apache.storm.command.Deactivate", + main_class_args=remove_common_options(sys.argv[2:]), storm_config_opts=args.storm_config_opts, + jvmtype="-client", + extrajars=[USER_CONF_DIR, STORM_BIN_DIR], + overriding_conf_file=args.config) + + +def rebalance(args): + for executor in args.executor: + try: + _, new_value = executor.split("=") + new_value = int(new_value) + if new_value < 0: + raise argparse.ArgumentTypeError("Executor count should be > 0") + except: + raise argparse.ArgumentTypeError("Should be in the form component_name=new_executor_count") + exec_storm_class( + "org.apache.storm.command.Rebalance", + main_class_args=remove_common_options(sys.argv[2:]), storm_config_opts=args.storm_config_opts, + jvmtype="-client", + extrajars=[USER_CONF_DIR, STORM_BIN_DIR], + overriding_conf_file=args.config) + + +def get_errors(args): + exec_storm_class( + "org.apache.storm.command.GetErrors", + main_class_args=remove_common_options(sys.argv[2:]), storm_config_opts=args.storm_config_opts, + jvmtype="-client", + extrajars=[USER_CONF_DIR, STORM_BIN_DIR], + overriding_conf_file=args.config) + + +def healthcheck(args): + exec_storm_class( + "org.apache.storm.command.HealthCheck", + main_class_args=remove_common_options(sys.argv[2:]), storm_config_opts=args.storm_config_opts, + jvmtype="-client", + extrajars=[USER_CONF_DIR, STORM_BIN_DIR], + overriding_conf_file=args.config) + + +def kill_workers(args): + exec_storm_class( + "org.apache.storm.command.KillWorkers", + main_class_args=remove_common_options(sys.argv[2:]), storm_config_opts=args.storm_config_opts, + jvmtype="-client", + extrajars=[USER_CONF_DIR, STORM_BIN_DIR], + overriding_conf_file=args.config) + + +def admin(args): + exec_storm_class( + "org.apache.storm.command.AdminCommands", + main_class_args=remove_common_options(sys.argv[2:]), storm_config_opts=args.storm_config_opts, + jvmtype="-client", + extrajars=[USER_CONF_DIR, STORM_BIN_DIR], + overriding_conf_file=args.config) + + +def shell(args): + tmpjarpath = "stormshell" + str(randint(0, 10000000)) + ".jar" + os.system("jar cf %s %s" % (tmpjarpath, args.resourcesdir)) + runnerargs = [tmpjarpath, args.command] + runnerargs.extend(args.args) + exec_storm_class( + "org.apache.storm.command.ShellSubmission", storm_config_opts=args.storm_config_opts, + main_class_args=runnerargs, + jvmtype="-client", + extrajars=[USER_CONF_DIR], + fork=True, + overriding_conf_file=args.config) + os.system("rm " + tmpjarpath) + + +def repl(args): + cppaths = [CLUSTER_CONF_DIR] + exec_storm_class( + "clojure.main", storm_config_opts=args.storm_config_opts, jvmtype="-client", extrajars=cppaths, + overriding_conf_file=args.config + ) + + +def get_log4j2_conf_dir(storm_config_opts, args): + cppaths = [CLUSTER_CONF_DIR] + storm_log4j2_conf_dir = confvalue( + "storm.log4j2.conf.dir", storm_config_opts=storm_config_opts, + extrapaths=cppaths, overriding_conf_file=args.config + ) + if not storm_log4j2_conf_dir or storm_log4j2_conf_dir == "null": + storm_log4j2_conf_dir = STORM_LOG4J2_CONF_DIR + elif not os.path.isabs(storm_log4j2_conf_dir): + storm_log4j2_conf_dir = os.path.join(STORM_DIR, storm_log4j2_conf_dir) + return storm_log4j2_conf_dir + + +def nimbus(args): + cppaths = [CLUSTER_CONF_DIR] + storm_config_opts = get_config_opts(args.storm_config_opts) + jvmopts = shlex.split(confvalue( + "nimbus.childopts", storm_config_opts=storm_config_opts, extrapaths=cppaths, overriding_conf_file=args.config + )) + [ + "-Djava.deserialization.disabled=true", + "-Dlogfile.name=nimbus.log", + "-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(storm_config_opts, args), "cluster.xml"), + ] + exec_storm_class( + "org.apache.storm.daemon.nimbus.Nimbus", storm_config_opts=args.storm_config_opts, + jvmtype="-server", + daemonName="nimbus", + extrajars=cppaths, + jvmopts=jvmopts, + overriding_conf_file=args.config) + + +def pacemaker(args): + cppaths = [CLUSTER_CONF_DIR] + storm_config_opts = get_config_opts(args.storm_config_opts) + + jvmopts = shlex.split(confvalue( + "pacemaker.childopts", storm_config_opts=storm_config_opts, + extrapaths=cppaths, overriding_conf_file=args.config) + ) + [ + "-Djava.deserialization.disabled=true", + "-Dlogfile.name=pacemaker.log", + "-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(storm_config_opts, args), "cluster.xml"), + ] + exec_storm_class( + "org.apache.storm.pacemaker.Pacemaker", storm_config_opts=args.storm_config_opts, + jvmtype="-server", + daemonName="pacemaker", + extrajars=cppaths, + jvmopts=jvmopts, + overriding_conf_file=args.config) + + +def supervisor(args): + cppaths = [CLUSTER_CONF_DIR] + storm_config_opts = get_config_opts(args.storm_config_opts) + jvmopts = shlex.split(confvalue( + "supervisor.childopts", storm_config_opts=storm_config_opts, + extrapaths=cppaths, overriding_conf_file=args.config) + ) + [ + "-Djava.deserialization.disabled=true", + "-Dlogfile.name=" + STORM_SUPERVISOR_LOG_FILE, + "-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(storm_config_opts, args), "cluster.xml"), + ] + exec_storm_class( + "org.apache.storm.daemon.supervisor.Supervisor", storm_config_opts=args.storm_config_opts, + jvmtype="-server", + daemonName="supervisor", + extrajars=cppaths, + jvmopts=jvmopts, + overriding_conf_file=args.config) + + +def ui(args): + cppaths = [CLUSTER_CONF_DIR] + storm_config_opts = get_config_opts(args.storm_config_opts) + jvmopts = shlex.split(confvalue( + "ui.childopts", storm_config_opts=storm_config_opts, extrapaths=cppaths, overriding_conf_file=args.config) + ) + [ + "-Djava.deserialization.disabled=true", + "-Dlogfile.name=ui.log", + "-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(storm_config_opts, args), "cluster.xml") + ] + + allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR) + allextrajars.append(CLUSTER_CONF_DIR) + exec_storm_class( + "org.apache.storm.daemon.ui.UIServer", storm_config_opts=args.storm_config_opts, + jvmtype="-server", + daemonName="ui", + jvmopts=jvmopts, + extrajars=allextrajars, + overriding_conf_file=args.config) + + +def logviewer(args): + cppaths = [CLUSTER_CONF_DIR] + storm_config_opts = get_config_opts(args.storm_config_opts) + jvmopts = shlex.split( + confvalue( + "logviewer.childopts", storm_config_opts=storm_config_opts, + extrapaths=cppaths, overriding_conf_file=args.config + ) + ) + [ + "-Djava.deserialization.disabled=true", + "-Dlogfile.name=logviewer.log", + "-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(storm_config_opts, args), "cluster.xml") + ] + + allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR) + allextrajars.append(CLUSTER_CONF_DIR) + exec_storm_class( + "org.apache.storm.daemon.logviewer.LogviewerServer", storm_config_opts=args.storm_config_opts, + jvmtype="-server", + daemonName="logviewer", + jvmopts=jvmopts, + extrajars=allextrajars, + overriding_conf_file=args.config) + + +def drpc_client(args): + if not args.function and (len(args.function_arguments) % 2): + raise argparse.ArgumentTypeError( + "If no -f is supplied arguments need to be in the form [function arg]. " + + "This has {} args".format( + len(args.function_arguments) + ) + ) + + exec_storm_class( + "org.apache.storm.command.BasicDrpcClient", + main_class_args=remove_common_options(sys.argv[2:]), storm_config_opts=args.storm_config_opts, + jvmtype="-client", + extrajars=[USER_CONF_DIR, STORM_BIN_DIR], + overriding_conf_file=args.config) + + +def drpc(args): + cppaths = [CLUSTER_CONF_DIR] + storm_config_opts = get_config_opts(args.storm_config_opts) + jvmopts = shlex.split( + confvalue( + "drpc.childopts", storm_config_opts=storm_config_opts, extrapaths=cppaths, overriding_conf_file=args.config + ) + ) + [ + "-Djava.deserialization.disabled=true", + "-Dlogfile.name=drpc.log", + "-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(storm_config_opts, args), "cluster.xml") + ] + allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR) + allextrajars.append(CLUSTER_CONF_DIR) + exec_storm_class( + "org.apache.storm.daemon.drpc.DRPCServer", storm_config_opts=args.storm_config_opts, + jvmtype="-server", + daemonName="drpc", + jvmopts=jvmopts, + extrajars=allextrajars, + overriding_conf_file=args.config) + + +def dev_zookeeper(args): + storm_config_opts = get_config_opts(args.storm_config_opts) + jvmopts = [ + "-Dlogfile.name=dev-zookeeper.log", + "-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(storm_config_opts, args), "cluster.xml") + ] + exec_storm_class( + "org.apache.storm.command.DevZookeeper", storm_config_opts=args.storm_config_opts, + jvmtype="-server", + daemonName="dev_zookeeper", + jvmopts=jvmopts, + extrajars=[CLUSTER_CONF_DIR], + overriding_conf_file=args.config) + + +def version(args): + exec_storm_class( + "org.apache.storm.utils.VersionInfo", storm_config_opts=args.storm_config_opts, + jvmtype="-client", + extrajars=[CLUSTER_CONF_DIR], + overriding_conf_file=args.config) + + +def print_classpath(args): + print(get_classpath([], client=True)) + + +def print_server_classpath(args): + print(get_classpath([], daemon=True)) + + +def monitor(args): + exec_storm_class( + "org.apache.storm.command.Monitor", storm_config_opts=args.storm_config_opts, + main_class_args=remove_common_options(sys.argv[2:]), + jvmtype="-client", + extrajars=[USER_CONF_DIR, STORM_BIN_DIR]) + + +def main(): + init_storm_env() + storm_parser = initialize_main_command() + if len(sys.argv) == 1: + storm_parser.print_help(sys.stderr) + sys.exit(1) + raw_args, unknown_args = storm_parser.parse_known_args() + if hasattr(raw_args, "main_args"): + raw_args.main_args += unknown_args + raw_args.func(raw_args) + + +if __name__ == "__main__": + main() diff --git a/bin/test_docker_to_squash.py b/bin/test_docker_to_squash.py new file mode 100644 index 00000000000..d7a44e95b35 --- /dev/null +++ b/bin/test_docker_to_squash.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python3 + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from unittest import TestCase +# import docker-to-squash as dtsq +dtsq = __import__('docker-to-squash') # TODO: rename docker-to-squash.py as docker_to_squash.py + + +class Test(TestCase): + def test_shell_command(self): + """ + shell_command is used by many functions in docker_to_squash.py. Ensure that it works correctly. + Prior code was not returning any values, and was not detected till PR https://github.com/apache/storm/pull/3475 + :return: + """ + # expect success + cmd = ["ls", "-l"] + out, err, rc = dtsq.shell_command(cmd, True, True, True, timeout_sec=10) + self.assertEqual(0, rc, f"Failed cmd={cmd}\nrc={rc}\nout={out}\nerr={err}") + + # expect failure + cmd = ["badcmd", "-l"] + out, err, rc = dtsq.shell_command(cmd, True, True, True, timeout_sec=10) + self.assertNotEqual(0, rc, f"Expected to fail cmd={cmd}\nrc={rc}\nout={out}\nerr={err}") + + # TODO: + # def test_read_image_tag_to_hash(self): + # """ + # The base method behaves differently, since string in python3 is always unicode. Base method was flips between + # byte arrays and strings. This may not always work properly in python3. + # :return: + # """ + # image_tag_to_hash = "" + # hash_to_tags, tag_to_hash = dtsq.read_image_tag_to_hash(image_tag_to_hash) + diff --git a/bin/test_storm.py b/bin/test_storm.py new file mode 100644 index 00000000000..11c8057b0a8 --- /dev/null +++ b/bin/test_storm.py @@ -0,0 +1,217 @@ +#!/usr/bin/env python3 + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import unittest +import storm +import os + + +class Test(unittest.TestCase): + """ + Mostly just test coverage + """ + _testMethodName = None + _testMethodDoc = None + + def __init__(self, method_name="None"): + super().__init__() + self._testMethodName = method_name + storm.init_storm_env(within_unittest=True) + + def test_get_jars_full(self): + storm.get_jars_full(".") + + def test_get_wildcard_dir(self): + s = storm.get_wildcard_dir("./") + self.assertEqual(s, ["./*"]) + + def test_get_java_cmd(self): + s = storm.get_java_cmd() + expected = 'java' if not storm.is_windows() else 'java.exe' + if storm.JAVA_HOME: + expected = os.path.join(storm.JAVA_HOME, 'bin', expected) + self.assertEqual(s, expected) + + def test_confvalue(self): + name = 'name' + storm_config_opts = {'ui.port': '8080'} + extrapaths = [] + overriding_conf_file = None + daemon = True + s = storm.confvalue(name, storm_config_opts, extrapaths, overriding_conf_file, daemon) + expected = "" + self.assertEqual(s, expected) + + def test_get_classpath(self): + extrajars = [f"jar{x}.jar" for x in range(5)] + daemon = True + client = False + s = storm.get_classpath(extrajars, daemon, client) + expected = ":".join(extrajars) + self.assertEqual(s[-len(expected):], expected) + + def test_resolve_dependencies(self): + artifacts = "org.apache.commons.commons-api" + artifact_repositories = "maven-central" + maven_local_repos_dir = "~/.m2" + proxy_url = None + proxy_username = None + proxy_password = None + try: + output = storm.resolve_dependencies(artifacts, artifact_repositories, maven_local_repos_dir, + proxy_url, proxy_username, proxy_password) + except RuntimeError as ex: + print(f"Unexpected {ex=}, {type(ex)=}") + # test coverage only + + def test_exec_storm_class(self): + klass = "org.apache.storm.starter.WordCountTopology" + storm_config_opts = [] + jvmtype = "-server" + jvmopts = [] + extrajars = [] + main_class_args = [] + fork = False + daemon = True + client = False + daemonName = "" + overriding_conf_file = None + # exit_code = storm.exec_storm_class(klass, storm_config_opts=storm_config_opts, jvmtype=jvmtype, jvmopts=jvmopts, + # extrajars=extrajars, main_class_args=main_class_args, fork=fork, + # daemon=daemon, client=client, daemonName=daemonName, + # overriding_conf_file=overriding_conf_file) + + def test_run_client_jar(self): + pass + + def test_print_localconfvalue(self): + class Args: + conf_name = self._testMethodName + storm_config_opts = {self._testMethodName: "confvalue"} + config = "config/file/path.yaml" + + args = Args() + storm.print_localconfvalue(args) + + def test_print_remoteconfvalue(self): + class Args: + conf_name = self._testMethodName + storm_config_opts = {self._testMethodName: "confvalue"} + config = "config/file/path.yaml" + + args = Args() + storm.print_remoteconfvalue(args) + + def test_initialize_main_command(self): + storm.initialize_main_command() + + def test_jar(self): + pass + + def test_local(self): + pass + + def test_sql(self): + pass + + def test_kill(self): + pass + + def test_upload_credentials(self): + pass + + def test_blob(self): + pass + + def test_heartbeats(self): + pass + + def test_activate(self): + pass + + def test_listtopos(self): + pass + + def test_set_log_level(self): + pass + + def test_deactivate(self): + pass + + def test_rebalance(self): + pass + + def test_get_errors(self): + pass + + def test_healthcheck(self): + pass + + def test_kill_workers(self): + pass + + def test_admin(self): + pass + + def test_shell(self): + pass + + def test_repl(self): + pass + + def test_get_log4j2_conf_dir(self): + pass + + def test_nimbus(self): + pass + + def test_pacemaker(self): + pass + + def test_supervisor(self): + pass + + def test_ui(self): + pass + + def test_logviewer(self): + pass + + def test_drpc_client(self): + pass + + def test_drpc(self): + pass + + def test_dev_zookeeper(self): + pass + + def test_version(self): + pass + + def test_print_classpath(self): + storm.print_classpath(None) + + def test_print_server_classpath(self): + storm.print_server_classpath(None) + + def test_monitor(self): + pass + + diff --git a/conf/cgconfig.conf.example b/conf/cgconfig.conf.example new file mode 100644 index 00000000000..70ac4958426 --- /dev/null +++ b/conf/cgconfig.conf.example @@ -0,0 +1,41 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +mount { + cpuset = /cgroup/cpuset; + cpu = /cgroup/storm_resources; + cpuacct = /cgroup/cpuacct; + memory = /cgroup/storm_resources; + devices = /cgroup/devices; + freezer = /cgroup/freezer; + net_cls = /cgroup/net_cls; + blkio = /cgroup/blkio; +} + +group storm { + perm { + task { + uid = 500; + gid = 500; + } + admin { + uid = 500; + gid = 500; + } + } + cpu { + } +} diff --git a/conf/defaults.yaml b/conf/defaults.yaml index bb1cb040bed..4368099725c 100644 --- a/conf/defaults.yaml +++ b/conf/defaults.yaml @@ -18,11 +18,12 @@ ########### These all have default values as shown ########### Additional configuration goes into storm.yaml -java.library.path: "/usr/local/lib:/opt/local/lib:/usr/lib" +java.library.path: "/usr/local/lib:/opt/local/lib:/usr/lib:/usr/lib64" ### storm.* configs are general configurations # the local dir is where jars are kept storm.local.dir: "storm-local" +storm.log4j2.conf.dir: "log4j2" storm.zookeeper.servers: - "localhost" storm.zookeeper.port: 2181 @@ -32,46 +33,133 @@ storm.zookeeper.connection.timeout: 15000 storm.zookeeper.retry.times: 5 storm.zookeeper.retry.interval: 1000 storm.zookeeper.retry.intervalceiling.millis: 30000 +storm.zookeeper.auth.user: null +storm.zookeeper.auth.password: null +storm.zookeeper.ssl.enable: false +storm.zookeeper.ssl.hostnameVerification: false storm.cluster.mode: "distributed" # can be distributed or local storm.local.mode.zmq: false -storm.thrift.transport: "backtype.storm.security.auth.SimpleTransportPlugin" -storm.messaging.transport: "backtype.storm.messaging.netty.Context" +storm.thrift.transport: "org.apache.storm.security.auth.SimpleTransportPlugin" +storm.thrift.socket.timeout.ms: 600000 +storm.thrift.tls.socket.timeout.ms: 600000 +storm.principal.tolocal: "org.apache.storm.security.auth.DefaultPrincipalToLocal" +storm.group.mapping.service: "org.apache.storm.security.auth.ShellBasedGroupsMapping" +storm.group.mapping.service.params: null +storm.messaging.transport: "org.apache.storm.messaging.netty.Context" +storm.nimbus.retry.times: 5 +storm.nimbus.retry.interval.millis: 2000 +storm.nimbus.retry.intervalceiling.millis: 60000 +storm.nimbus.zookeeper.acls.check: true +storm.nimbus.zookeeper.acls.fixup: true + +storm.auth.simple-white-list.users: [] +storm.cluster.state.store: "org.apache.storm.cluster.ZKStateStorageFactory" +storm.meta.serialization.delegate: "org.apache.storm.serialization.GzipThriftSerializationDelegate" +storm.codedistributor.class: "org.apache.storm.codedistributor.LocalFileSystemCodeDistributor" +storm.workers.artifacts.dir: "workers-artifacts" +storm.health.check.dir: "healthchecks" +storm.health.check.timeout.ms: 5000 +storm.disable.symlinks: false ### nimbus.* configs are for the master -nimbus.host: "localhost" +nimbus.seeds : ["localhost"] nimbus.thrift.port: 6627 +nimbus.thrift.threads: 64 nimbus.thrift.max_buffer_size: 1048576 +nimbus.thrift.tls.port: 0 +nimbus.thrift.tls.threads: 64 +nimbus.thrift.tls.max_buffer_size: 1048576 +nimbus.thrift.client.use.tls: false +nimbus.thrift.tls.transport: "org.apache.storm.security.auth.tls.TlsTransportPlugin" +nimbus.thrift.tls.client.auth.required: true +topology.worker.nimbus.thrift.client.use.tls: false nimbus.childopts: "-Xmx1024m" nimbus.task.timeout.secs: 30 nimbus.supervisor.timeout.secs: 60 nimbus.monitor.freq.secs: 10 nimbus.cleanup.inbox.freq.secs: 600 nimbus.inbox.jar.expiration.secs: 3600 +nimbus.code.sync.freq.secs: 120 nimbus.task.launch.secs: 120 -nimbus.reassign: true nimbus.file.copy.expiration.secs: 600 -nimbus.topology.validator: "backtype.storm.nimbus.DefaultTopologyValidator" +nimbus.topology.validator: "org.apache.storm.nimbus.DefaultTopologyValidator" +topology.min.replication.count: 1 +topology.max.replication.wait.time.sec: 60 +nimbus.credential.renewers.freq.secs: 600 +nimbus.queue.size: 100000 +scheduler.display.resource: false +nimbus.local.assignments.backend.class: "org.apache.storm.assignments.InMemoryAssignmentBackend" +nimbus.assignments.service.threads: 10 +nimbus.assignments.service.thread.queue.size: 100 +nimbus.worker.heartbeats.recovery.strategy.class: "org.apache.storm.nimbus.TimeOutWorkerHeartbeatsRecoveryStrategy" +nimbus.topology.blobstore.deletion.delay.ms: 300000 ### ui.* configs are for the master +ui.host: 0.0.0.0 ui.port: 8080 +ui.title: "Storm UI" ui.childopts: "-Xmx768m" +ui.actions.enabled: true +ui.filter: null +ui.filter.params: null +ui.users: null +ui.header.buffer.bytes: 4096 +ui.http.creds.plugin: org.apache.storm.security.auth.DefaultHttpCredentialsPlugin +ui.pagination: 20 +ui.disable.http.binding: true +ui.disable.spout.lag.monitoring: true logviewer.port: 8000 logviewer.childopts: "-Xmx128m" +logviewer.cleanup.age.mins: 10080 logviewer.appender.name: "A1" +logviewer.max.sum.worker.logs.size.mb: 4096 +logviewer.max.per.worker.logs.size.mb: 2048 +logviewer.disable.http.binding: true +logviewer.filter: null +logviewer.filter.params: null +logs.users: null drpc.port: 3772 drpc.worker.threads: 64 +drpc.max_buffer_size: 1048576 drpc.queue.size: 128 drpc.invocations.port: 3773 +drpc.invocations.threads: 64 drpc.request.timeout.secs: 600 drpc.childopts: "-Xmx768m" +drpc.http.port: 3774 +drpc.https.port: -1 +drpc.https.keystore.password: "" +drpc.https.keystore.type: "JKS" +drpc.http.creds.plugin: org.apache.storm.security.auth.DefaultHttpCredentialsPlugin +drpc.authorizer.acl.filename: "drpc-auth-acl.yaml" +drpc.authorizer.acl.strict: false +drpc.disable.http.binding: true transactional.zookeeper.root: "/transactional" transactional.zookeeper.servers: null transactional.zookeeper.port: null +## blobstore configs +supervisor.blobstore.class: "org.apache.storm.blobstore.NimbusBlobStore" +supervisor.blobstore.download.thread.count: 5 +supervisor.blobstore.download.max_retries: 3 +supervisor.localizer.cache.target.size.mb: 10240 +supervisor.localizer.cleanup.interval.ms: 30000 +supervisor.localizer.update.blob.interval.secs: 30 + +nimbus.blobstore.class: "org.apache.storm.blobstore.LocalFsBlobStore" +nimbus.blobstore.expiration.secs: 600 + +storm.blobstore.inputstream.buffer.size.bytes: 65536 +storm.blobstore.dependency.jar.upload.chunk.size.bytes: 1048576 +client.blobstore.class: "org.apache.storm.blobstore.NimbusBlobStore" +storm.blobstore.replication.factor: 3 +# For secure mode we would want to change this config to true +storm.blobstore.acl.validation.enabled: false + ### supervisor.* configs are for node supervisors # Define the amount of workers that can be run on this machine. Each worker is assigned a port to use for communication supervisor.slots.ports: @@ -80,65 +168,263 @@ supervisor.slots.ports: - 6702 - 6703 supervisor.childopts: "-Xmx256m" +supervisor.run.worker.as.user: false #how long supervisor will wait to ensure that a worker process is started supervisor.worker.start.timeout.secs: 120 #how long between heartbeats until supervisor considers that worker dead and tries to restart it supervisor.worker.timeout.secs: 30 +#How many seconds to allow for graceful worker shutdown when killing workers before resorting to force kill +supervisor.worker.shutdown.sleep.secs: 3 #how frequently the supervisor checks on the status of the processes it's monitoring and restarts if necessary supervisor.monitor.frequency.secs: 3 #how frequently the supervisor heartbeats to the cluster state (for nimbus) supervisor.heartbeat.frequency.secs: 5 +#max timeout for a node worker heartbeats when master gains leadership +supervisor.worker.heartbeats.max.timeout.secs: 600 +#For topology configurable heartbeat timeout, maximum allowed heartbeat timeout. +worker.max.timeout.secs: 600 supervisor.enable: true +supervisor.supervisors: [] +supervisor.supervisors.commands: [] +supervisor.memory.capacity.mb: 4096.0 +#By convention 1 cpu core should be about 100, but this can be adjusted if needed +# using 100 makes it simple to set the desired value to the capacity measurement +# for single threaded bolts +supervisor.cpu.capacity: 400.0 + +#Supervisor thrift config +supervisor.thrift.port: 6628 +supervisor.queue.size: 128 +supervisor.thrift.threads: 16 +supervisor.thrift.max_buffer_size: 1048576 +supervisor.thrift.socket.timeout.ms: 5000 ### worker.* configs are for task workers -worker.childopts: "-Xmx768m" +worker.heap.memory.mb: 768 +worker.childopts: "-Xmx%HEAP-MEM%m -XX:+IgnoreUnrecognizedVMOptions -XX:+PrintGCDetails -Xloggc:artifacts/gc.log -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=1M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=artifacts/heapdump" +worker.gc.childopts: "" + +# Unlocking commercial features requires a special license from Oracle. +# See http://www.oracle.com/technetwork/java/javase/terms/products/index.html +# For this reason, profiler features are disabled by default. +worker.profiler.enabled: false +worker.profiler.childopts: "-XX:+UnlockCommercialFeatures -XX:+FlightRecorder" +worker.profiler.command: "flight.bash" worker.heartbeat.frequency.secs: 1 -task.heartbeat.frequency.secs: 3 -task.refresh.poll.secs: 10 +# check whether dynamic log levels can be reset from DEBUG to INFO in workers +worker.log.level.reset.poll.secs: 30 + +# control how many worker receiver threads we need per worker +topology.worker.receiver.thread.count: 1 -zmq.threads: 1 -zmq.linger.millis: 5000 -zmq.hwm: 0 +# Executor metrics reporting interval. +# Cause the ui only show built in metrics, we should keep sync with the built in metrics interval, +# also the metrics consumer's collecting interval. +# See topology.builtin.metrics.bucket.size.secs and storm.cluster.metrics.consumer.publish.interval.secs. +executor.metrics.frequency.secs: 60 +task.heartbeat.frequency.secs: 3 +task.refresh.poll.secs: 10 +task.credentials.poll.secs: 30 +# Used by workers to communicate storm.messaging.netty.server_worker_threads: 1 storm.messaging.netty.client_worker_threads: 1 storm.messaging.netty.buffer_size: 5242880 #5MB buffer -storm.messaging.netty.max_retries: 30 +storm.messaging.netty.flush_timeout_ms: 600000 + +# The netty write buffer high watermark in bytes. +# If the number of bytes queued in the netty's write buffer exceeds this value, the netty client will block +# until the value falls below the low water mark. +storm.messaging.netty.buffer.high.watermark: 16777216 # 16 MB +# The netty write buffer low watermark in bytes. +# Once the number of bytes queued in the write buffer exceeded the high water mark and then +# dropped down below this value, any blocked clients will unblock and start processing further messages. +storm.messaging.netty.buffer.low.watermark: 8388608 # 8 MB +# Since nimbus.task.launch.secs and supervisor.worker.start.timeout.secs are 120, other workers should also wait at least that long before giving up on connecting to the other worker. The reconnection period need also be bigger than storm.zookeeper.session.timeout(default is 20s), so that we can abort the reconnection when the target worker is dead. storm.messaging.netty.max_wait_ms: 1000 storm.messaging.netty.min_wait_ms: 100 +# If the Netty messaging layer is busy(netty internal buffer not writable), the Netty client will try to batch message as more as possible up to the size of storm.messaging.netty.transfer.batch.size bytes, otherwise it will try to flush message as soon as possible to reduce latency. +storm.messaging.netty.transfer.batch.size: 262144 +# Sets the backlog value to specify when the channel binds to a local address +storm.messaging.netty.socket.backlog: 500 + +# By default, the Netty SASL authentication is set to false. Users can override and set it true for a specific topology. +# see https://issues.apache.org/jira/browse/STORM-348 for more details +storm.messaging.netty.authentication: false + +# Default plugin to use for automatic network topology discovery +storm.network.topography.plugin: org.apache.storm.networktopography.DefaultRackDNSToSwitchMapping + +# default number of seconds group mapping service will cache user group +storm.group.mapping.service.cache.duration.secs: 120 + ### topology.* configs are for specific executing storms topology.enable.message.timeouts: true topology.debug: false topology.workers: 1 topology.acker.executors: null +topology.ras.acker.executors.per.worker: 1 +topology.eventlogger.executors: 0 topology.tasks: null # maximum amount of time a message has to complete before it's considered failed topology.message.timeout.secs: 30 -topology.multilang.serializer: "backtype.storm.multilang.JsonSerializer" +topology.multilang.serializer: "org.apache.storm.multilang.JsonSerializer" +topology.shellbolt.max.pending: 100 topology.skip.missing.kryo.registrations: false topology.max.task.parallelism: null -topology.max.spout.pending: null +topology.max.spout.pending: null # ideally should be larger than topology.producer.batch.size. (esp. if topology.batch.flush.interval.millis=0) topology.state.synchronization.timeout.secs: 60 topology.stats.sample.rate: 0.05 topology.builtin.metrics.bucket.size.secs: 60 -topology.fall.back.on.java.serialization: true +topology.fall.back.on.java.serialization: false topology.worker.childopts: null -topology.executor.receive.buffer.size: 1024 #batched -topology.executor.send.buffer.size: 1024 #individual messages -topology.receiver.buffer.size: 8 # setting it too high causes a lot of problems (heartbeat thread gets starved, throughput plummets) -topology.transfer.buffer.size: 1024 # batched +topology.worker.logwriter.childopts: "-Xmx64m" topology.tick.tuple.freq.secs: null topology.worker.shared.thread.pool.size: 4 -topology.disruptor.wait.strategy: "com.lmax.disruptor.BlockingWaitStrategy" -topology.spout.wait.strategy: "backtype.storm.spout.SleepSpoutWaitStrategy" -topology.sleep.spout.wait.strategy.time.ms: 1 + +# Spout Wait Strategy - employed when there is no data to produce +topology.spout.wait.strategy: "org.apache.storm.policy.WaitStrategyProgressive" +topology.spout.wait.park.microsec : 100 # park time for org.apache.storm.policy.WaitStrategyPark. Busy spins if set to 0. + +topology.spout.wait.progressive.level1.count: 0 # number of iterations to spend in level 1 [no sleep] of WaitStrategyProgressive, before progressing to level 2 +topology.spout.wait.progressive.level2.count: 0 # number of iterations to spend in level 2 [parkNanos(1)] of WaitStrategyProgressive, before progressing to level 3 +topology.spout.wait.progressive.level3.sleep.millis: 1 # sleep duration for idling iterations in level 3 of WaitStrategyProgressive + +# Bolt Wait Strategy - employed when there is no data in its receive buffer to process +topology.bolt.wait.strategy : "org.apache.storm.policy.WaitStrategyProgressive" + +topology.bolt.wait.park.microsec : 100 # park time for org.apache.storm.policy.WaitStrategyPark. Busy spins if set to 0. + +topology.bolt.wait.progressive.level1.count: 1 # number of iterations to spend in level 1 [no sleep] of WaitStrategyProgressive, before progressing to level 2 +topology.bolt.wait.progressive.level2.count: 1000 # number of iterations to spend in level 2 [parkNanos(1)] of WaitStrategyProgressive, before progressing to level 3 +topology.bolt.wait.progressive.level3.sleep.millis: 1 # sleep duration for idling iterations in level 3 of WaitStrategyProgressive + +# BackPressure Wait Strategy - for any producer (spout/bolt/transfer thread) when the downstream Q is full +topology.backpressure.wait.strategy: "org.apache.storm.policy.WaitStrategyProgressive" + +topology.backpressure.wait.park.microsec: 100 # park time for org.apache.storm.policy.WaitStrategyPark. Busy spins if set to 0. + +topology.backpressure.wait.progressive.level1.count: 1 # number of iterations to spend in level 1 [no sleep] of WaitStrategyProgressive, before progressing to level 2 +topology.backpressure.wait.progressive.level2.count: 1000 # number of iterations to spend in level 2 [parkNanos(1)] of WaitStrategyProgressive, before progressing to level 3 +topology.backpressure.wait.progressive.level3.sleep.millis: 1 # sleep duration for idling iterations in level 3 of WaitStrategyProgressive + + +topology.backpressure.check.millis: 50 # how often to check if backpressure has relieved on executors under BP, for informing other workers to resume sending msgs to them. Must be > 0 +topology.executor.overflow.limit: 0 # max items in overflowQ of any bolt/spout. When exceeded, worker will drop incoming messages (from the workers) destined to that overflowing spout/bolt. Set to 0 to disable overflow limiting. Enabling this may degrade perf slightly. + topology.error.throttle.interval.secs: 10 topology.max.error.report.per.interval: 5 -topology.kryo.factory: "backtype.storm.serialization.DefaultKryoFactory" -topology.tuple.serializer: "backtype.storm.serialization.types.ListDelegateSerializer" +topology.kryo.factory: "org.apache.storm.serialization.DefaultKryoFactory" +topology.tuple.serializer: "org.apache.storm.serialization.types.ListDelegateSerializer" topology.trident.batch.emit.interval.millis: 500 +topology.testing.always.try.serialize: false +topology.classpath: null +topology.environment: null + +topology.transfer.buffer.size: 1000 # size of recv queue for transfer worker thread +topology.transfer.batch.size: 1 # can be no larger than half of `topology.transfer.buffer.size` + +topology.executor.receive.buffer.size: 32768 # size of recv queue for spouts & bolts. Will be internally rounded up to next power of 2 (if not already a power of 2) +topology.producer.batch.size: 1 # can be no larger than half of `topology.executor.receive.buffer.size` + +topology.batch.flush.interval.millis: 1 # Flush tuples are disabled if this is set to 0 or if (topology.producer.batch.size=1 and topology.transfer.batch.size=1). +topology.spout.recvq.skips: 3 # Check recvQ once every N invocations of Spout's nextTuple() [when ACKs disabled] + +topology.disable.loadaware.messaging: false +topology.state.checkpoint.interval.ms: 1000 +topology.localityaware.higher.bound: 0.8 +topology.localityaware.lower.bound: 0.2 +topology.serialized.message.size.metrics: false + +# Configs for Resource Aware Scheduler +# topology priority describing the importance of the topology in decreasing importance starting from 0 (i.e. 0 is the highest priority and the priority importance decreases as the priority number increases). +# Recommended range of 0-29 but no hard limit set. +topology.priority: 29 +topology.component.resources.onheap.memory.mb: 128.0 +topology.component.resources.offheap.memory.mb: 0.0 +topology.component.cpu.pcore.percent: 10.0 +topology.worker.max.heap.size.mb: 768.0 +topology.scheduler.strategy: "org.apache.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategy" +resource.aware.scheduler.priority.strategy: "org.apache.storm.scheduler.resource.strategies.priority.DefaultSchedulingPriorityStrategy" +topology.ras.constraint.max.state.search: 10_000 # The maximum number of states that will be searched looking for a solution in the constraint solver strategy +resource.aware.scheduler.constraint.max.state.search: 100_000 # Daemon limit on maximum number of states that will be searched looking for a solution in the constraint solver strategy +topology.ras.one.executor.per.worker: false +topology.ras.one.component.per.worker: false + +blacklist.scheduler.tolerance.time.secs: 300 +blacklist.scheduler.tolerance.count: 3 +blacklist.scheduler.resume.time.secs: 1800 +blacklist.scheduler.reporter: "org.apache.storm.scheduler.blacklist.reporters.LogReporter" +blacklist.scheduler.strategy: "org.apache.storm.scheduler.blacklist.strategies.DefaultBlacklistStrategy" +blacklist.scheduler.assume.supervisor.bad.based.on.bad.slot: true dev.zookeeper.path: "/tmp/dev-storm-zookeeper" + +pacemaker.servers: [] +pacemaker.port: 6699 +pacemaker.base.threads: 10 +pacemaker.max.threads: 50 +pacemaker.client.max.threads: 2 +pacemaker.thread.timeout: 10 +pacemaker.childopts: "-Xmx1024m" +pacemaker.auth.method: "NONE" +pacemaker.kerberos.users: [] +pacemaker.thrift.message.size.max: 10485760 + +#default storm daemon metrics reporter plugins +storm.daemon.metrics.reporter.plugins: + - "org.apache.storm.daemon.metrics.reporters.JmxPreparableReporter" +storm.daemon.metrics.reporter.interval.secs: 10 + +storm.metricstore.class: "org.apache.storm.metricstore.rocksdb.RocksDbStore" +storm.metricprocessor.class: "org.apache.storm.metricstore.NimbusMetricProcessor" +storm.metricstore.rocksdb.location: "storm_rocks" +storm.metricstore.rocksdb.create_if_missing: true +storm.metricstore.rocksdb.metadata_string_cache_capacity: 4000 +storm.metricstore.rocksdb.retention_hours: 240 + +# configuration of cluster metrics consumer +storm.cluster.metrics.consumer.publish.interval.secs: 60 + +storm.resource.isolation.plugin: "org.apache.storm.container.cgroup.CgroupManager" +# Also determines whether the unit tests for cgroup runs. +# If storm.resource.isolation.plugin.enable is set to false the unit tests for cgroups will not run +storm.resource.isolation.plugin.enable: false +storm.cgroup.memory.enforcement.enable: false +storm.cgroup.inherit.cpuset.configs: false + +# Configs for CGroup support +storm.cgroup.hierarchy.dir: "/cgroup/storm_resources" +storm.cgroup.resources: + - "cpu" + - "memory" +storm.cgroup.hierarchy.name: "storm" +storm.supervisor.cgroup.rootdir: "storm" +storm.cgroup.cgexec.cmd: "/bin/cgexec" +storm.cgroup.memory.limit.tolerance.margin.mb: 0.0 +storm.supervisor.memory.limit.tolerance.margin.mb: 128.0 +storm.supervisor.hard.memory.limit.multiplier: 2.0 +storm.supervisor.hard.memory.limit.overage.mb: 2024 +storm.supervisor.low.memory.threshold.mb: 1024 +storm.supervisor.medium.memory.threshold.mb: 1536 +storm.supervisor.medium.memory.grace.period.ms: 30000 + +storm.oci.cgroup.root: "/sys/fs/cgroup" +storm.oci.cgroup.parent: "/storm" +storm.oci.nscd.dir: "/var/run/nscd" +storm.worker.min.cpu.pcore.percent: 0.0 + +storm.topology.classpath.beginning.enabled: false +worker.metrics: + "CGroupMemory": "org.apache.storm.metrics2.cgroup.CGroupMemoryUsage" + "CGroupMemoryLimit": "org.apache.storm.metrics2.cgroup.CGroupMemoryLimit" + "CGroupCpu": "org.apache.storm.metrics2.cgroup.CGroupCpu" + "CGroupCpuGuarantee": "org.apache.storm.metrics2.cgroup.CGroupCpuGuarantee" + "CGroupCpuGuaranteeByCfsQuota": "org.apache.storm.metrics2.cgroup.CGroupCpuGuaranteeByCfsQuota" + "CGroupCpuStat": "org.apache.storm.metrics2.cgroup.CGroupCpuStat" + +# The number of buckets for running statistics +num.stat.buckets: 20 diff --git a/conf/drpc-auth-acl.yaml.example b/conf/drpc-auth-acl.yaml.example new file mode 100644 index 00000000000..1467518e4ea --- /dev/null +++ b/conf/drpc-auth-acl.yaml.example @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# For the function "functionName1", alice can perform client operations, and bob can +# perform invocation operations.User should replace "functionName1","functionName2","alice","bob" with their own value +drpc.authorizer.acl: + "functionName1": + "client.users": + - "alice" + - "bob" + "invocation.user": "bob" + "functionName2": + "client.users": + - "alice" diff --git a/conf/jaas_digest.conf b/conf/jaas_digest.conf index 06dd7aa1ac5..301f3e0c73a 100644 --- a/conf/jaas_digest.conf +++ b/conf/jaas_digest.conf @@ -17,21 +17,17 @@ */ /* This is example of JAAS Login configuration for digest authentication -*/ -/* StormServer section should contain a list of authorized users and their passwords. +StormClient section contains one user name and his/her password. */ StormServer { org.apache.zookeeper.server.auth.DigestLoginModule required user_super="adminsecret" - user_bob="bobsecret"; + user_bob="bobsecret" user_john="johnsecret"; }; -/* -StormClient section contains one user name and his/her password. -*/ StormClient { org.apache.zookeeper.server.auth.DigestLoginModule required username="bob" diff --git a/conf/log4j2.xml b/conf/log4j2.xml new file mode 100644 index 00000000000..8fcbf65de52 --- /dev/null +++ b/conf/log4j2.xml @@ -0,0 +1,32 @@ + + + + + + + + + + + + + + + + + diff --git a/conf/logback.xml b/conf/logback.xml deleted file mode 100644 index 7ccaae6a44b..00000000000 --- a/conf/logback.xml +++ /dev/null @@ -1,28 +0,0 @@ - - - - - - %-4r [%t] %-5p %c - %m%n - - - - - - - diff --git a/conf/seccomp.json.example b/conf/seccomp.json.example new file mode 100644 index 00000000000..3c1f550e1e0 --- /dev/null +++ b/conf/seccomp.json.example @@ -0,0 +1,407 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#json format doesn't support comments. Please remove this line and above before using this file. + +{ + "defaultAction": "SCMP_ACT_ERRNO", + "syscalls": [ + { + "names": [ + "accept", + "accept4", + "access", + "alarm", + "alarm", + "bind", + "brk", + "capget", + "capset", + "chdir", + "chmod", + "chown", + "chown32", + "clock_getres", + "clock_gettime", + "clock_nanosleep", + "close", + "connect", + "copy_file_range", + "creat", + "dup", + "dup2", + "dup3", + "epoll_create", + "epoll_create1", + "epoll_ctl", + "epoll_ctl_old", + "epoll_pwait", + "epoll_wait", + "epoll_wait_old", + "eventfd", + "eventfd2", + "execve", + "execveat", + "exit", + "exit_group", + "faccessat", + "fadvise64", + "fadvise64_64", + "fallocate", + "fanotify_mark", + "fchdir", + "fchmod", + "fchmodat", + "fchown", + "fchown32", + "fchownat", + "fcntl", + "fcntl64", + "fdatasync", + "fgetxattr", + "flistxattr", + "flock", + "fork", + "fremovexattr", + "fsetxattr", + "fstat", + "fstat64", + "fstatat64", + "fstatfs", + "fstatfs64", + "fsync", + "ftruncate", + "ftruncate64", + "futex", + "futimesat", + "getcpu", + "getcwd", + "getdents", + "getdents64", + "getegid", + "getegid32", + "geteuid", + "geteuid32", + "getgid", + "getgid32", + "getgroups", + "getgroups32", + "getitimer", + "getpeername", + "getpgid", + "getpgrp", + "getpid", + "getppid", + "getpriority", + "getrandom", + "getresgid", + "getresgid32", + "getresuid", + "getresuid32", + "getrlimit", + "get_robust_list", + "getrusage", + "getsid", + "getsockname", + "getsockopt", + "get_thread_area", + "gettid", + "gettimeofday", + "getuid", + "getuid32", + "getxattr", + "inotify_add_watch", + "inotify_init", + "inotify_init1", + "inotify_rm_watch", + "io_cancel", + "ioctl", + "io_destroy", + "io_getevents", + "ioprio_get", + "ioprio_set", + "io_setup", + "io_submit", + "ipc", + "kill", + "lchown", + "lchown32", + "lgetxattr", + "link", + "linkat", + "listen", + "listxattr", + "llistxattr", + "_llseek", + "lremovexattr", + "lseek", + "lsetxattr", + "lstat", + "lstat64", + "madvise", + "mbind", + "memfd_create", + "mincore", + "mkdir", + "mkdirat", + "mknod", + "mknodat", + "mlock", + "mlock2", + "mlockall", + "mmap", + "mmap2", + "mprotect", + "mq_getsetattr", + "mq_notify", + "mq_open", + "mq_timedreceive", + "mq_timedsend", + "mq_unlink", + "mremap", + "msgctl", + "msgget", + "msgrcv", + "msgsnd", + "msync", + "munlock", + "munlockall", + "munmap", + "nanosleep", + "newfstatat", + "_newselect", + "open", + "openat", + "pause", + "pipe", + "pipe2", + "poll", + "ppoll", + "prctl", + "pread64", + "preadv", + "prlimit64", + "pselect6", + "pwrite64", + "pwritev", + "read", + "readahead", + "readlink", + "readlinkat", + "readv", + "recv", + "recvfrom", + "recvmmsg", + "recvmsg", + "remap_file_pages", + "removexattr", + "rename", + "renameat", + "renameat2", + "restart_syscall", + "rmdir", + "rt_sigaction", + "rt_sigpending", + "rt_sigprocmask", + "rt_sigqueueinfo", + "rt_sigreturn", + "rt_sigsuspend", + "rt_sigtimedwait", + "rt_tgsigqueueinfo", + "sched_getaffinity", + "sched_getattr", + "sched_getparam", + "sched_get_priority_max", + "sched_get_priority_min", + "sched_getscheduler", + "sched_rr_get_interval", + "sched_setaffinity", + "sched_setattr", + "sched_setparam", + "sched_setscheduler", + "sched_yield", + "seccomp", + "select", + "semctl", + "semget", + "semop", + "semtimedop", + "send", + "sendfile", + "sendfile64", + "sendmmsg", + "sendmsg", + "sendto", + "setfsgid", + "setfsgid32", + "setfsuid", + "setfsuid32", + "setgid", + "setgid32", + "setgroups", + "setgroups32", + "setitimer", + "setpgid", + "setpriority", + "setregid", + "setregid32", + "setresgid", + "setresgid32", + "setresuid", + "setresuid32", + "setreuid", + "setreuid32", + "setrlimit", + "set_robust_list", + "setsid", + "setsockopt", + "set_thread_area", + "set_tid_address", + "setuid", + "setuid32", + "setxattr", + "shmat", + "shmctl", + "shmdt", + "shmget", + "shutdown", + "sigaltstack", + "signalfd", + "signalfd4", + "sigreturn", + "socket", + "socketcall", + "socketpair", + "splice", + "stat", + "stat64", + "statfs", + "statfs64", + "symlink", + "symlinkat", + "sync", + "sync_file_range", + "syncfs", + "sysinfo", + "syslog", + "tee", + "tgkill", + "time", + "timer_create", + "timer_delete", + "timerfd_create", + "timerfd_gettime", + "timerfd_settime", + "timer_getoverrun", + "timer_gettime", + "timer_settime", + "times", + "tkill", + "truncate", + "truncate64", + "ugetrlimit", + "umask", + "uname", + "unlink", + "unlinkat", + "utime", + "utimensat", + "utimes", + "vfork", + "vmsplice", + "wait4", + "waitid", + "waitpid", + "write", + "writev", + "mount", + "umount2", + "reboot", + "name_to_handle_at", + "unshare" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 0, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 8, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 4294967295, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "names": [ + "arch_prctl" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "modify_ldt" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "clone" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 2080505856, + "valueTwo": 0, + "op": "SCMP_CMP_MASKED_EQ" + } + ] + } + ] +} \ No newline at end of file diff --git a/conf/storm-cluster-auth.yaml.example b/conf/storm-cluster-auth.yaml.example new file mode 100644 index 00000000000..9ed6a19e39e --- /dev/null +++ b/conf/storm-cluster-auth.yaml.example @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +########### Users should replace ${user} and ${pwd} with their own username and password +#storm.zookeeper.auth.payload:${user}:${pwd} \ No newline at end of file diff --git a/conf/storm-env.ps1 b/conf/storm-env.ps1 new file mode 100644 index 00000000000..326c91cd4c8 --- /dev/null +++ b/conf/storm-env.ps1 @@ -0,0 +1,23 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Set Storm specific environment variables here. + +# The java implementation to use. +$env:JAVA_HOME = $env:JAVA_HOME; + +#$env:STORM_CONF_DIR = "" \ No newline at end of file diff --git a/conf/storm-env.sh b/conf/storm-env.sh new file mode 100644 index 00000000000..5a9eb06da5f --- /dev/null +++ b/conf/storm-env.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Set Storm specific environment variables here. + +# The java implementation to use. +export JAVA_HOME=${JAVA_HOME} + +# export STORM_CONF_DIR="" diff --git a/conf/storm.yaml.example b/conf/storm.yaml.example index 5fd35f89f26..2711ef09a25 100644 --- a/conf/storm.yaml.example +++ b/conf/storm.yaml.example @@ -19,7 +19,7 @@ # - "server1" # - "server2" # -# nimbus.host: "nimbus" +# nimbus.seeds: ["host1", "host2", "host3"] # # # ##### These may optionally be filled in: @@ -39,10 +39,63 @@ # - "server2" ## Metrics Consumers +## max.retain.metric.tuples +## - task queue will be unbounded when max.retain.metric.tuples is equal or less than 0. +## whitelist / blacklist +## - when none of configuration for metric filter are specified, it'll be treated as 'pass all'. +## - you need to specify either whitelist or blacklist, or none of them. You can't specify both of them. +## - you can specify multiple whitelist / blacklist with regular expression +## expandMapType: expand metric with map type as value to multiple metrics +## - set to true when you would like to apply filter to expanded metrics +## - default value is false which is backward compatible value +## metricNameSeparator: separator between origin metric name and key of entry from map +## - only effective when expandMapType is set to true +## - default value is "." # topology.metrics.consumer.register: -# - class: "backtype.storm.metric.LoggingMetricsConsumer" +# - class: "org.apache.storm.metric.LoggingMetricsConsumer" +# max.retain.metric.tuples: 100 # parallelism.hint: 1 # - class: "org.mycompany.MyMetricsConsumer" +# max.retain.metric.tuples: 100 +# whitelist: +# - "execute.*" +# - "^__complete-latency$" # parallelism.hint: 1 # argument: # - endpoint: "metrics-collector.mycompany.org" +# expandMapType: true +# metricNameSeparator: "." + +## Cluster Metrics Consumers +# storm.cluster.metrics.consumer.register: +# - class: "org.apache.storm.metric.LoggingClusterMetricsConsumer" +# - class: "org.mycompany.MyMetricsConsumer" +# argument: +# - endpoint: "metrics-collector.mycompany.org" +# +# storm.cluster.metrics.consumer.publish.interval.secs: 60 + +# Event Logger +# topology.event.logger.register: +# - class: "org.apache.storm.metric.FileBasedEventLogger" +# - class: "org.mycompany.MyEventLogger" +# arguments: +# endpoint: "event-logger.mycompany.org" + +# Topology metrics v2 configuration (optional) +#topology.metrics.reporters: +# # Graphite Reporter +# - class: "org.apache.storm.metrics2.reporters.GraphiteStormReporter" +# report.period: 60 +# report.period.units: "SECONDS" +# graphite.host: "localhost" +# graphite.port: 2003 +# +# # Console Reporter +# - class: "org.apache.storm.metrics2.reporters.ConsoleStormReporter" +# report.period: 10 +# report.period.units: "SECONDS" +# filter: +# class: "org.apache.storm.metrics2.filters.RegexFilter" +# expression: ".*my_component.*emitted.*" + diff --git a/conf/storm_env.ini b/conf/storm_env.ini index d7af930e247..5fc9356d71e 100644 --- a/conf/storm_env.ini +++ b/conf/storm_env.ini @@ -18,7 +18,7 @@ # Environment variables in the following section will be used -# in storm pytyon script. It override the environment variables +# in storm python script. They override the environment variables # set in the shell. [environment] diff --git a/conf/storm_jaas.conf b/conf/storm_jaas.conf new file mode 100644 index 00000000000..765ca158dca --- /dev/null +++ b/conf/storm_jaas.conf @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +StormServer { + com.sun.security.auth.module.Krb5LoginModule required + useKeyTab=true + keyTab="$keytab" + storeKey=true + useTicketCache=false + principal="$principal"; +}; +StormClient { + com.sun.security.auth.module.Krb5LoginModule required + useKeyTab=true + keyTab="$keytab" + storeKey=true + useTicketCache=false + serviceName="$nimbus_user" + principal="$principal"; +}; +Client { + com.sun.security.auth.module.Krb5LoginModule required + useKeyTab=true + keyTab="$keytab" + storeKey=true + useTicketCache=false + serviceName="zookeeper" + principal="$principal"; +}; + + diff --git a/conf/user-resource-pools-example.yaml b/conf/user-resource-pools-example.yaml new file mode 100644 index 00000000000..829a6be9df4 --- /dev/null +++ b/conf/user-resource-pools-example.yaml @@ -0,0 +1,26 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +resource.aware.scheduler.user.pools: + jerry: + cpu: 1000 + memory: 8192.0 + derek: + cpu: 10000.0 + memory: 32768 + bobby: + cpu: 5000.0 + memory: 16384.0 \ No newline at end of file diff --git a/conf/zookeeper_jaas.conf b/conf/zookeeper_jaas.conf new file mode 100644 index 00000000000..c38d55e26b7 --- /dev/null +++ b/conf/zookeeper_jaas.conf @@ -0,0 +1,35 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ +Server { + com.sun.security.auth.module.Krb5LoginModule required + useKeyTab=true + keyTab="$keytab" + storeKey=true + useTicketCache=false + principal="$principal"; +}; +Client { + com.sun.security.auth.module.Krb5LoginModule required + useKeyTab=true + keyTab="$keytab" + storeKey=true + useTicketCache=false + serviceName="zookeeper" + principal="$principal"; +}; + diff --git a/dev-tools/checkstyle.xslt b/dev-tools/checkstyle.xslt new file mode 100644 index 00000000000..54ff18f8e6d --- /dev/null +++ b/dev-tools/checkstyle.xslt @@ -0,0 +1,40 @@ + + + + + + + + + + + + + + + : + + + + + + + + + diff --git a/dev-tools/collect_license_files.sh b/dev-tools/collect_license_files.sh new file mode 100644 index 00000000000..c09cc424260 --- /dev/null +++ b/dev-tools/collect_license_files.sh @@ -0,0 +1,57 @@ +#!/usr/bin/env bash +################################################################################ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +################################################################################ + +# This script extracts from all jars in the specified directory the NOTICE files and the +# licenses folders. It then concatenates all NOTICE files and collects the contents of all +# licenses folders in the specified output directory. +# +# This tool can be used to generate a rough skeleton for the binary NOTICE file. Be aware, +# that it does not deduplicate contents. + +set -Eeuo pipefail + +SRC=${1:-.} +DST=${2:-licenses-output} +PWD=$(pwd) +TMP="${DST}/tmp" + +USAGE="collect_license_files " + +if [ "${SRC}" = "-h" ]; then + echo "${USAGE}" + exit 0 +fi + +for i in $(find -L "${SRC}" -name "*.jar") +do + DIR="${TMP}/$(basename -- "$i" .jar)" + mkdir -p "${DIR}" + JAR="${PWD}/${i}" + (cd "${DIR}" && jar xf ${JAR} META-INF/NOTICE META-INF/licenses) +done + +NOTICE="${DST}/NOTICE" +[ -f "${NOTICE}" ] && rm "${NOTICE}" +find "${TMP}" -name "NOTICE" | sort | xargs -I fname sh -c "(cat fname; echo '\n========================\n') >> ${NOTICE}" + +LICENSES="${DST}/licenses" +[ -f "${LICENSES}" ] && rm -r "" +find "${TMP}" -name "licenses" -type d -exec cp -r -- "{}" "${DST}" \; + +rm -r "${TMP}" diff --git a/dev-tools/docker/Dockerfile b/dev-tools/docker/Dockerfile new file mode 100644 index 00000000000..0a61ac33cb6 --- /dev/null +++ b/dev-tools/docker/Dockerfile @@ -0,0 +1,50 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +FROM ubuntu:latest + +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + curl \ + wget \ + git \ + bash \ + unzip \ + ca-certificates \ + autoconf \ + automake \ + libssl-dev libtool pkg-config \ + ruby ruby-dev \ + python3.10 pip \ + openjdk-17-jdk \ + build-essential && \ + rm -rf /var/lib/apt/lists/* + +# Install Node.js 20.x +RUN curl -fsSL https://deb.nodesource.com/setup_20.x | bash - && \ + apt-get install -y nodejs && \ + npm install -g npm@latest + +# Set up Maven +ARG MAVEN_VERSION=3.9.9 +RUN wget https://archive.apache.org/dist/maven/maven-3/${MAVEN_VERSION}/binaries/apache-maven-${MAVEN_VERSION}-bin.zip && \ + unzip apache-maven-${MAVEN_VERSION}-bin.zip -d /opt && \ + ln -s /opt/apache-maven-${MAVEN_VERSION}/bin/mvn /usr/bin/mvn && \ + rm apache-maven-${MAVEN_VERSION}-bin.zip + +# Set environment variables for Maven and Java +ENV MAVEN_HOME=/opt/apache-maven-${MAVEN_VERSION} +ENV PATH="${MAVEN_HOME}/bin:${PATH}" + +USER ubuntu \ No newline at end of file diff --git a/dev-tools/docker/README.md b/dev-tools/docker/README.md new file mode 100644 index 00000000000..0405fa9b4a4 --- /dev/null +++ b/dev-tools/docker/README.md @@ -0,0 +1,30 @@ +# Apache Storm Development Dockerfile + +This Dockerfile provides a complete development environment for Apache Storm, aligning with the GitHub Actions CI setup +for building and testing various modules of Apache Storm. + +It installs and configures Java, Maven, Python, Node.js, and Ruby, allowing you to run builds and tests for different Storm modules in a containerized environment. + +This is especially useful for people on Mac OSX or Windows. It also provides a consistent environment for all developers. + +## Usage + +Build it by running: + +```bash +docker build -t storm-dev . +``` + +## Run a build + +```bash +docker run -it \ +--name storm-dev \ +-e MAVEN_OPTS="-Xmx768m -XX:ReservedCodeCacheSize=64m -Xss2048k" \ +-v $(pwd)/m2:/home/ubuntu/.m2 \ +-v $(pwd):/opt/project \ +-w /opt/project \ +storm-dev +``` + +Advanced cases such as remote debugging are also possible. Just map the debugger port and start maven accordingly. \ No newline at end of file diff --git a/dev-tools/find-checkstyle-issues.py b/dev-tools/find-checkstyle-issues.py new file mode 100755 index 00000000000..9a6d5a14b4b --- /dev/null +++ b/dev-tools/find-checkstyle-issues.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from optparse import OptionParser +import subprocess + + +def getCheckstyleFor(f, check_result): + f = os.path.abspath(f) + check_result = os.path.abspath(check_result) + ret = subprocess.check_output(['xsltproc', '--stringparam', 'target', f, './dev-tools/checkstyle.xslt', check_result]) + if not ret.isspace(): + print(ret) + + +def main(): + parser = OptionParser(usage="usage: %prog [options]") + parser.add_option("-c", "--checkstyle-result", dest="check_result", + type="string", help="the checkstyle-result.xml file to parse", metavar="FILE") + + (options, args) = parser.parse_args() + + for f in args: + getCheckstyleFor(f, options.check_result) + + +if __name__ == "__main__": + main() diff --git a/dev-tools/gitact/gitact-install.sh b/dev-tools/gitact/gitact-install.sh new file mode 100755 index 00000000000..4b933b457f4 --- /dev/null +++ b/dev-tools/gitact/gitact-install.sh @@ -0,0 +1,61 @@ +#!/bin/bash +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# shellcheck disable=SC2006 +echo "Python version : $(python -V 2>&1) (note python2 is not supported) " +echo "Python3 version : $(python3 -V 2>&1) " +echo "Pip3 version : $(pip3 --version 2>&1) " + + +echo "Maven version : $(mvn -v)" + +STORM_SRC_ROOT_DIR=$1 + +TRAVIS_SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) + +pip3 install --user -r "${TRAVIS_SCRIPT_DIR}"/requirements.txt + +python3 "${TRAVIS_SCRIPT_DIR}"/save-logs.py "storm-shaded-deps/install-shade.txt" mvn clean install --batch-mode -pl storm-shaded-deps -am +BUILD_RET_VAL=$? +if [[ "$BUILD_RET_VAL" != "0" ]]; +then + cat "storm-shaded-deps/install-shade.txt" + exit ${BUILD_RET_VAL} +fi + +cd "${STORM_SRC_ROOT_DIR}" || ( echo "Cannot cd to ${STORM_SRC_ROOT_DIR}"; exit 1 ) + +# Check the operating system +OS="$(uname)" +echo $OS +# Run the command only if the OS is not macOS +if [ "$OS" != "Darwin" ]; then + python3 "${TRAVIS_SCRIPT_DIR}"/save-logs.py "install.txt" mvn clean install -DskipTests -Pnative,examples,externals -pl '!storm-shaded-deps' --batch-mode +else + echo "Running on macOS. Skipping -Pnative." + python3 "${TRAVIS_SCRIPT_DIR}"/save-logs.py "install.txt" mvn clean install -DskipTests -Pexamples,externals -pl '!storm-shaded-deps' --batch-mode +fi +BUILD_RET_VAL=$? + +if [[ "$BUILD_RET_VAL" != "0" ]]; +then + cat "install.txt" + echo "Looking for unapproved licenses" + for rat in $(find . -name rat.txt) + do + python3 "${TRAVIS_SCRIPT_DIR}"/ratprint.py "${rat}" + done +fi + + +exit ${BUILD_RET_VAL} diff --git a/dev-tools/gitact/gitact-script.sh b/dev-tools/gitact/gitact-script.sh new file mode 100755 index 00000000000..42094383605 --- /dev/null +++ b/dev-tools/gitact/gitact-script.sh @@ -0,0 +1,60 @@ +#!/bin/bash +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +echo "Python3 version: " $(python3 -V 2>&1) +echo "Ruby version : " $(ruby -v) +echo "NodeJs version : " $(node -v) +echo "Maven version : " $(mvn -v) + +set -x + +STORM_SRC_ROOT_DIR=$1 + +THIS_SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) + +cd "${STORM_SRC_ROOT_DIR}" || (echo "Cannot cd to ${STORM_SRC_ROOT_DIR}"; exit 1) + +if [ "$2" == "Integration-Test" ] + then + exec ./integration-test/run-it.sh +elif [ "$2" == "Check-Updated-License-Files" ] + then + exec python3 dev-tools/validate-license-files.py --skip-build-storm +elif [ "$2" == "Client" ] +then + TEST_MODULES=storm-client +elif [ "$2" == "Server" ] +then + TEST_MODULES=storm-server,storm-webapp +elif [ "$2" == "Core" ] +then + TEST_MODULES=storm-core +elif [ "$2" == "External" ] +then + TEST_MODULES='!storm-client,!storm-server,!storm-core,!storm-webapp,!storm-shaded-deps' +fi +# We should be concerned that Travis CI could be very slow because it uses VM +export STORM_TEST_TIMEOUT_MS=150000 +# Github Action Runner only has 7GB of memory, lets use 1.5GB for build, with enough stack to run tests +export MAVEN_OPTS="-Xmx2048m" + +mvn --batch-mode test -fae -Pnative,all-tests,examples,externals -Prat -pl "$TEST_MODULES" +BUILD_RET_VAL=$? + +for dir in $(find . -type d -and -wholename \*/target/\*-reports) +do + echo "Looking for errors in ${dir}" + python3 "${THIS_SCRIPT_DIR}"/print-errors-from-test-reports.py "${dir}" +done + +exit ${BUILD_RET_VAL} diff --git a/dev-tools/gitact/print-errors-from-test-reports.py b/dev-tools/gitact/print-errors-from-test-reports.py new file mode 100644 index 00000000000..886b9ab557e --- /dev/null +++ b/dev-tools/gitact/print-errors-from-test-reports.py @@ -0,0 +1,81 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import glob +import traceback +from xml.etree.ElementTree import ElementTree + + +def print_detail_information(testcase, fail_or_error): + print("-" * 50) + print("classname: %s / testname: %s" % (testcase.get("classname"), testcase.get("name"))) + print(fail_or_error.text) + stdout = testcase.find("system-out") + if stdout is not None: + print("-" * 20, "system-out", "-"*20) + print(stdout.text) + stderr = testcase.find("system-err") + if stderr is not None: + print("-" * 20, "system-err", "-"*20) + print(stderr.text) + print("-" * 50) + + +def print_error_reports_from_report_file(file_path): + tree = ElementTree() + try: + tree.parse(file_path) + except: + print("-" * 50) + print("Error parsing %s"%file_path) + f = open(file_path, "r") + print(f.read()) + print("-" * 50) + return + + testcases = tree.findall(".//testcase") + for testcase in testcases: + error = testcase.find("error") + if error is not None: + print_detail_information(testcase, error) + + fail = testcase.find("fail") + if fail is not None: + print_detail_information(testcase, fail) + + failure = testcase.find("failure") + if failure is not None: + print_detail_information(testcase, failure) + + +def main(report_dir_path): + for test_report in glob.iglob(report_dir_path + '/*.xml'): + file_path = os.path.abspath(test_report) + try: + print("Checking %s" % test_report) + print_error_reports_from_report_file(file_path) + except Exception as e: + print("Error while reading report file, %s" % file_path) + print("Exception: %s" % e) + traceback.print_exc() + + +if __name__ == "__main__": + if len(sys.argv) < 2: + print("Usage: %s [report dir path]" % sys.argv[0]) + sys.exit(1) + + main(sys.argv[1]) diff --git a/dev-tools/gitact/ratprint.py b/dev-tools/gitact/ratprint.py new file mode 100755 index 00000000000..70e0d9560c6 --- /dev/null +++ b/dev-tools/gitact/ratprint.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import re + +p = re.compile(r'Unapproved licenses:\s*([^\s\*]*).*\*\*\*') + +with open (sys.argv[1]) as ratfile: + rat = ratfile.read().replace('\n', '') + +matches = p.search(rat) +failed = matches.group(1) + +if re.search(r'\S', failed): + print(failed) diff --git a/dev-tools/gitact/requirements.txt b/dev-tools/gitact/requirements.txt new file mode 100644 index 00000000000..b3ff2deab2d --- /dev/null +++ b/dev-tools/gitact/requirements.txt @@ -0,0 +1,13 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +mock == 2.0.0 \ No newline at end of file diff --git a/dev-tools/gitact/save-logs.py b/dev-tools/gitact/save-logs.py new file mode 100755 index 00000000000..a3a52152f33 --- /dev/null +++ b/dev-tools/gitact/save-logs.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import subprocess +from datetime import datetime, timedelta + + +def main(file, cmd): + print(cmd, "writing to", file) + out = open(file, "w") + count = 0 + process = subprocess.Popen(cmd, + stderr=subprocess.STDOUT, + stdout=subprocess.PIPE) + + start = datetime.now() + nextPrint = datetime.now() + timedelta(seconds=1) + # wait for the process to terminate + pout = process.stdout + line = pout.readline() + while line: + line = line.decode('utf-8') + count = count + 1 + if datetime.now() > nextPrint: + diff = datetime.now() - start + sys.stdout.write(f"\r{diff.seconds} seconds {count} log lines") + sys.stdout.flush() + nextPrint = datetime.now() + timedelta(seconds=10) + out.write(line) + line = pout.readline() + out.close() + errcode = process.wait() + diff = datetime.now() - start + sys.stdout.write(f"\r{diff.seconds} seconds {count} log lines") + print() + print(cmd, "done", errcode) + return errcode + + +if __name__ == "__main__": + if len(sys.argv) < 3: + print(f"Usage: {sys.argv[0]} ") + sys.exit(1) + + sys.exit(main(sys.argv[1], sys.argv[2:])) diff --git a/dev-tools/github/__init__.py b/dev-tools/github/__init__.py new file mode 100644 index 00000000000..4cc659dd1cd --- /dev/null +++ b/dev-tools/github/__init__.py @@ -0,0 +1,133 @@ +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import getpass +import base64 +import urllib +import urllib.request +from datetime import datetime +import re + +try: + import json +except ImportError: + import simplejson as json + + +def git_time(obj): + if obj is None: + return None + return datetime.strptime(obj[0:19], "%Y-%m-%dT%H:%M:%S") + + +class GitPullRequest: + """Pull Request from Git""" + + storm_jira_number = re.compile("STORM-[0-9]+", re.I) + + def __init__(self, data, parent): + self.data = data + self.parent = parent + + def html_url(http://github.com/self): + return self.data["html_url"] + + def title(self): + return self.data["title"] + + def trimmed_title(self): + limit = 40 + title = self.data["title"] + return title if len(title) < limit else title[0:limit] + "..." + + def number(self): + return self.data["number"] + + # TODO def review_comments + + def user(self): + return self.data["user"]["login"] + + def from_branch(self): + return self.data["head"]["ref"] + + def from_repo(self): + return self.data["head"]["repo"]["clone_url"] + + def merged(self): + return self.data["merged_at"] is not None + + def raw(self): + return self.data + + def created_at(self): + return git_time(self.data["created_at"]) + + def updated_at(self): + return git_time(self.data["updated_at"]) + + def merged_at(self): + return git_time(self.data["merged_at"]) + + def has_jira_id(self): + return GitPullRequest.storm_jira_number.search(self.title()) + + def jira_id(self): + return GitPullRequest.storm_jira_number.search(self.title()).group(0).upper() + + def __str__(self): + return self.html_url() + + def __repr__(self): + return self.html_url() + + +class GitHub: + """Github API""" + + def __init__(self, options): + self.headers = {} + if options.gituser: + gitpassword = getpass.getpass("github.com user " + options.gituser + ":") + authstr = base64.encodestring('%s:%s' % (options.gituser, gitpassword)).replace('\n', '') + self.headers["Authorization"] = "Basic " + authstr + + def pulls(self, user, repo, type="all"): + page = 1 + ret = [] + while True: + url = f"https://api.github.com/repos/%7Buser%7D/%7Brepo%7D/pulls?state={type}&page={page}" + req = urllib.request.Request(url, None, self.headers) + result = urllib.request.urlopen(req) + contents = result.read().decode() + if result.getcode() != 200: + raise Exception(result.getcode() + " != 200 " + contents) + got = json.loads(contents) + for part in got: + ret.append(GitPullRequest(part, self)) + if len(got) == 0: + return ret + page = page + 1 + + def open_pulls(self, user, repo): + return self.pulls(user, repo, "open") + + def pull(self, user, repo, number): + url = f"https://api.github.com/repos/%7Buser%7D/%7Brepo%7D/pulls/%7Bnumber%7D" + req = urllib.request.Request(url, None, self.headers) + result = urllib.request.urlopen(req) + contents = result.read().decode() + if result.getcode() != 200: + raise Exception(result.getcode() + " != 200 " + contents) + got = json.loads(contents) + return GitPullRequest(got, self) diff --git a/dev-tools/rc/download-rc-directory.sh b/dev-tools/rc/download-rc-directory.sh new file mode 100755 index 00000000000..c5d674a7fb4 --- /dev/null +++ b/dev-tools/rc/download-rc-directory.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +TARGET_URL=$1 + +if [ "${TARGET_URL}" == "" ]; +then + echo "USAGE: $0 [target url]" + exit 1 +fi + +echo "> downloading all files in RC directory..." + +wget -r -nH -nd -np -R "index.html*" $1 + +echo "Done..." diff --git a/dev-tools/rc/verify-release-file.sh b/dev-tools/rc/verify-release-file.sh new file mode 100755 index 00000000000..3e6a8690b18 --- /dev/null +++ b/dev-tools/rc/verify-release-file.sh @@ -0,0 +1,50 @@ +#!/bin/bash +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +TARGET_FILE=$1 + +if [ "${TARGET_FILE}" == "" ]; +then + echo "USAGE: $0 [target file path]" + exit 1 +fi + +echo "> checking file ${TARGET_FILE}" + +# verifying +ASC_TARGET_FILE="${TARGET_FILE}.asc" + +echo ">> verifying signature... (${ASC_TARGET_FILE})" +gpg --verify ${ASC_TARGET_FILE} ${TARGET_FILE} + +if [ $? -eq 0 ]; +then + echo 'Signature seems correct' +else + echo 'Signature seems not correct' +fi + +# checking SHA +GPG_SHA_FILE="/tmp/${TARGET_FILE}_GPG.sha512" +gpg --print-md SHA512 ${TARGET_FILE} > ${GPG_SHA_FILE} +SHA_TARGET_FILE="${TARGET_FILE}.sha512" + +echo ">> checking SHA file... (${SHA_TARGET_FILE})" +diff /tmp/${TARGET_FILE}_GPG.sha512 ${SHA_TARGET_FILE} + +if [ $? -eq 0 ]; +then + echo 'SHA file is correct' +else + echo 'SHA file is not correct' +fi diff --git a/dev-tools/release_notes.py b/dev-tools/release_notes.py new file mode 100755 index 00000000000..3862ac51cdb --- /dev/null +++ b/dev-tools/release_notes.py @@ -0,0 +1,151 @@ +#!/usr/bin/env python3 + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Usage: release_notes.py > RELEASE_NOTES.html + +Depends on "requests", please use pip to install this module. + +Generates release notes for a Storm release by generating an HTML doc containing some introductory information about the + release with links to the Storm docs followed by a list of issues resolved in the release. The script will fail if it finds + any unresolved issues still marked with the target release. You should run this script after either resolving all issues or + moving outstanding issues to a later release. + +""" + +import requests +import sys +import os + +if len(sys.argv) < 2: + print("Usage: release_notes.py ", file=sys.stderr) + sys.exit(1) + +# GitHub configuration +GITHUB_API_BASE_URL = "https://api.github.com/" +GITHUB_TOKEN = os.getenv("GITHUB_TOKEN") + +if not GITHUB_TOKEN: + print("Error: GITHUB_TOKEN environment variable not set.", file=sys.stderr) + sys.exit(1) + +# Input arguments +owner = "apache" +repo = "storm" +milestone = sys.argv[1] # Milestone ID + + +headers = { + "Authorization": f"Bearer {GITHUB_TOKEN}", + "Accept": "application/vnd.github.v3+json" +} + +def get_milestone_title(owner, repo, milestone_number): + """ + Fetch the title of a specific milestone by its number. + """ + url = f"{GITHUB_API_BASE_URL}/repos/{owner}/{repo}/milestones/{milestone_number}" + response = requests.get(url, headers=headers) + + if response.status_code != 200: + print(f"Failed to fetch milestone: {response.status_code} {response.reason}", file=sys.stderr) + sys.exit(1) + + milestone = response.json() + return milestone["title"] + +def get_issues(owner, repo, milestone): + """ + Fetch all issues for a given milestone from a GitHub repository. + """ + issues_url = f"{GITHUB_API_BASE_URL}/repos/{owner}/{repo}/issues" + params = { + "milestone": milestone, + "state": "all", # Include both open and closed issues + "per_page": 100 + } + + issues = [] + while issues_url: + response = requests.get(issues_url, headers=headers, params=params) + if response.status_code != 200: + print(f"Failed to fetch issues: {response.status_code} {response.reason}", file=sys.stderr) + sys.exit(1) + + data = response.json() + issues.extend(data) + # Get next page URL from 'Link' header if available + issues_url = response.links.get("next", {}).get("url") + + return issues + +def issue_link(issue): + return issue["html_url"] + +if __name__ == "__main__": + issues = get_issues(owner, repo, milestone) + + if not issues: + print("No issues found for the specified milestone.", file=sys.stderr) + sys.exit(1) + + unresolved_issues = [issue for issue in issues if issue["state"] != "closed"] + if unresolved_issues: + print("The release is not completed since unresolved issues were found:", file=sys.stderr) + for issue in unresolved_issues: + print(f"Unresolved issue: {issue['number']:5d} {issue['state']:10s} {issue_link(issue)}", file=sys.stderr) + sys.exit(1) + + # Group issues by labels + issues_by_label = {} + unlabeled_issues = [] + for issue in issues: + if issue["labels"]: # If the issue has labels + for label in issue["labels"]: + label_name = label["name"] + issues_by_label.setdefault(label_name, []).append(issue) + else: + unlabeled_issues.append(issue) # Add to the unlabeled list if no labels exist + + # Add unlabeled issues under a special "No Label" category + if unlabeled_issues: + issues_by_label["Uncategorized"] = unlabeled_issues + + issues_str = "\n".join([ + f"\n\t

{label}

" + + f"\n\t
    " + + "\n\t\t".join([ + f'
  • [#{issue["number"]}] - {issue["title"]}
  • ' + for issue in issues + ]) + + "\n\t
" + for label, issues in issues_by_label.items() + ]) + + version = get_milestone_title(owner, repo, milestone) + + print(f""" + + + +Release Notes for Apache Storm {version} + + +

Release Notes for Apache Storm {version}

+

Issues addressed in {version}.

+{issues_str} + +""") diff --git a/dev-tools/report/__init__.py b/dev-tools/report/__init__.py new file mode 100644 index 00000000000..81a0e86e291 --- /dev/null +++ b/dev-tools/report/__init__.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/dev-tools/report/formatter.py b/dev-tools/report/formatter.py new file mode 100644 index 00000000000..1bdd558928c --- /dev/null +++ b/dev-tools/report/formatter.py @@ -0,0 +1,60 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +class Formatter: + def __init__(self, fields_tuple=(), row_tuple=(), min_width_tuple=None): + # Format to pass as first argument to the print function, e.g. '%s%s%s' + self.format = "" + # data_format will be of the form ['{!s:43}'],'{!s:39}','{!s:11}','{!s:25}'] + # the widths are determined from the data in order to print output with nice format + # Each entry of the data_format list will be used by the advanced string formatter: + # "{!s:43}".format("Text") + # Advanced string formatter as detailed in here: https://www.python.org/dev/peps/pep-3101/ + self.data_format = [] + Formatter._assert(fields_tuple, row_tuple, min_width_tuple) + self._build_format_tuples(fields_tuple, row_tuple, min_width_tuple) + + @staticmethod + def _assert(o1, o2, o3): + if len(o1) != len(o2) and (o3 is not None and len(o2) != len(o3)): + raise RuntimeError("Object collections must have the same length. " + "len(o1)={0}, len(o2)={1}, len(o3)={2}" + .format(len(o1), len(o2), -1 if o3 is None else len(o3))) + + # determines the widths from the data in order to print output with nice format + @staticmethod + def _find_sizes(fields_tuple, row_tuple, min_width_tuple): + sizes = [] + padding = 3 + for i in range(0, len(row_tuple)): + max_len = max(len(fields_tuple[i]), len(str(row_tuple[i]))) + if min_width_tuple is not None: + max_len = max(max_len, min_width_tuple[i]) + sizes += [max_len + padding] + return sizes + + def _build_format_tuples(self, fields_tuple, row_tuple, min_width_tuple): + sizes = Formatter._find_sizes(fields_tuple, row_tuple, min_width_tuple) + + for i in range(0, len(row_tuple)): + self.format += "%s" + self.data_format += ["{!s:" + str(sizes[i]) + "}"] + + # Returns a tuple where each entry has a string that is the result of + # statements with the pattern "{!s:43}".format("Text") + def row_str_format(self, row_tuple): + format_with_values = [self.data_format[0].format(row_tuple[0])] + for i in range(1, len(row_tuple)): + format_with_values += [self.data_format[i].format(row_tuple[i])] + return tuple(format_with_values) diff --git a/dev-tools/report/report.py b/dev-tools/report/report.py new file mode 100755 index 00000000000..bba78bc6eae --- /dev/null +++ b/dev-tools/report/report.py @@ -0,0 +1,254 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from datetime import datetime +from jira_github import Jira +from .formatter import Formatter + + +def daydiff(a, b): + return (a - b).days + + +class Report: + now = datetime.utcnow() + + def __init__(self, header=''): + self.header = header + + # if padding starts with - it puts padding before contents, otherwise after + @staticmethod + def _build_tuple(contents, padding=''): + if padding: + out = [] + for i in range(len(contents)): + out += [padding[1:] + str(contents[i])] if padding[0] == '-' else [str(contents[i]) + padding] + return tuple(out) + return contents + + # calls the native print function with the following format. Text1,Text2,... has the correct spacing + # print("%s%s%s" % ("Text1, Text2, Text3)) + def print_(self, formatter, row_tuple): + print(formatter.format % formatter.row_str_format(row_tuple)) + + +class JiraReport(Report): + def __init__(self, issues, header=''): + Report.__init__(self, header) + self.issues = issues + + def view(self, excluded): + issues_view = dict(self.issues) + for key in excluded: + issues_view.pop(key, None) + return issues_view + + def keys_view(self, excluded): + return self.view(excluded).keys().sort(Jira.storm_jira_cmp, reverse=True) + + def values_view(self, excluded=None): + temp_dic = dict(self.issues) if excluded is None else self.view(excluded) + return sorted(temp_dic.values(), key=lambda jira: jira.get_id_num(), reverse=True) + + @staticmethod + def _row_tuple(jira): + return (jira.get_id(), jira.get_trimmed_summary(), daydiff(Report.now, jira.get_created()), + daydiff(Report.now, jira.get_updated())) + + def _min_width_tuple(self): + return -1, 43, -1, -1 + + def print_report(self): + print(f"{self.header} (Count = {len(self.issues)}) ") + jiras = self.values_view() + fields_tuple = ('Jira Id', 'Summary', 'Created', 'Last Updated (Days)') + row_tuple = self._row_tuple(jiras[0]) + + formatter = Formatter(fields_tuple, row_tuple, self._min_width_tuple()) + + self.print_(formatter, fields_tuple) + + for jira in jiras: + row_tuple = self._row_tuple(jira) + self.print_(formatter, row_tuple) + + @staticmethod + def build_jira_url(http://github.com/jira_id): + BASE_URL = "https://issues.apache.org/jira/browse/" + return BASE_URL + jira_id + + +class GitHubReport(Report): + def __init__(self, pull_requests=None, header=''): + Report.__init__(self, header) + + if pull_requests is None: + self.pull_requests = [] + self.type = '' + else: + self.pull_requests = pull_requests + self.type = type + + def _row_tuple(self, pull): + return self._build_tuple( + (pull.html_url(), pull.trimmed_title(), daydiff(Report.now, pull.created_at()), + daydiff(Report.now, pull.updated_at()), pull.user()), '') + + def _min_width_tuple(self): + return -1, 43, -1, -1, -1 + + def print_report(self): + print("%s (Count = %s) " % (self.header, len(self.pull_requests))) + + fields_tuple = self._build_tuple(('URL', 'Title', 'Created', 'Last Updated (Days)', 'User'), '') + if len(self.pull_requests) > 0: + row_tuple = self._row_tuple(self.pull_requests[0]) + + formatter = Formatter(fields_tuple, row_tuple, self._min_width_tuple()) + + self.print_(formatter, fields_tuple) + for pull in self.pull_requests: + row_tuple = self._row_tuple(pull) + self.print_(formatter, row_tuple) + + def jira_ids(self): + """ + :return: sorted list of JIRA ids present in Git pull requests + """ + jira_ids = list() + for pull in self.pull_requests: + jira_ids.append(pull.jira_id()) + return sorted(jira_ids) + + +class JiraGitHubCombinedReport(Report): + def __init__(self, jira_report, github_report, header='', print_comments=False): + Report.__init__(self, header) + self.jira_report = jira_report + self.github_report = github_report + self.print_comments = print_comments + + def _jira_comments(self, jira_id): + return None if jira_id is None else self.jira_report.issues[jira_id].get_comments() + + def _idx_1st_comment_with_vote(self): + g = 0 + for pull in self.github_report.pull_requests: + c = 0 + for comment in self._jira_comments(pull.jira_id()): + if comment.has_vote(): + return(g,) + (c,) + c += 1 + g += 1 + + def _pull_request(self, pull_idx): + pull = self.github_report.pull_requests[pull_idx] + return pull + + def _jira_id(self, pull_idx): + pull = self._pull_request(pull_idx) + return str(pull.jira_id()) + + def _jira_issue(self, jira_id): + return self.jira_report.issues[jira_id] + + def _row_tuple(self, pull_idx): + pull = self._pull_request(pull_idx) + jira_id = self._jira_id(pull_idx) + jira_issue = self._jira_issue(jira_id) + + return (jira_id, str(pull) if pull else "No PR", jira_issue.get_trimmed_summary(), + daydiff(Report.now, jira_issue.get_created()), + daydiff(Report.now, pull.created_at() if pull else "No PR"), + daydiff(Report.now, jira_issue.get_updated()), + daydiff(Report.now, pull.updated_at() if pull else "No PR"), + jira_issue.get_status(), pull.user()) + + def _row_tuple_1(self, pull_idx, comment_idx): + row_tuple_1 = None + jira_id = self._jira_id(pull_idx) + jira_comments = self._jira_comments(jira_id) + comment = jira_comments[comment_idx] + if comment.has_vote(): + row_tuple_1 = (comment.get_vote(), comment.get_author(), comment.get_pull(), + daydiff(Report.now, comment.get_created())) + + return row_tuple_1 + + # variables and method names ending with _1 correspond to the comments part + def print_report(self, print_comments=False): + pull_request_cnt = len(self.github_report.pull_requests) + print("%s (Count = %s) " % (self.header, pull_request_cnt)) + if not pull_request_cnt: + return + + fields_tuple = ('JIRA ID', 'Pull Request', 'Jira Summary', 'JIRA Age', + 'Pull Age', 'JIRA Update Age', 'Pull Update Age (Days)', + 'JIRA Status', 'GitHub user') + row_tuple = self._row_tuple(0) + formatter = Formatter(fields_tuple, row_tuple) + self.print_(formatter, fields_tuple) + + if print_comments or self.print_comments: + fields_tuple_1 = self._build_tuple(('Comment Vote', 'Comment Author', 'Pull URL', 'Comment Age'), '-\t\t') + row_tuple_1 = self._build_tuple(self._row_tuple_1(*self._idx_1st_comment_with_vote()), '-\t\t') + formatter_1 = Formatter(fields_tuple_1, row_tuple_1) + self.print_(formatter_1, fields_tuple_1) + print('') + + for p in range(0, len(self.github_report.pull_requests)): + row_tuple = self._row_tuple(p) + self.print_(formatter, row_tuple) + + if print_comments or self.print_comments: + has_vote = False + comments = self._jira_comments(self._jira_id(p)) + for c in range(len(comments)): # Check cleaner way + comment = comments[c] + if comment.has_vote(): + row_tuple_1 = self._build_tuple(self._row_tuple_1(p, c), '-\t\t') + if row_tuple_1 is not None: + formatter_1 = Formatter() + self.print_(formatter_1, row_tuple_1) + has_vote = True + if has_vote: + print('') + + +class CompleteReport(Report): + def __init__(self, header=''): + Report.__init__(self, header) + self.jira_reports = [] + self.github_reports = [] + self.jira_github_combined_reports = [] + + def print_all(self): + if self.header: + print(self.header) + + self._print_github_reports() + self._print_jira_github_combined_reports() + self._print_jira_reports() + + def _print_jira_reports(self): + for jira in self.jira_reports: + jira.print_report() + + def _print_github_reports(self): + for github in self.github_reports: + github.print_report() + + def _print_jira_github_combined_reports(self): + for jira_github_combined in self.jira_github_combined_reports: + jira_github_combined.print_report() diff --git a/dev-tools/report/report_builder.py b/dev-tools/report/report_builder.py new file mode 100644 index 00000000000..6f5c842bd6d --- /dev/null +++ b/dev-tools/report/report_builder.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from .report import CompleteReport, GitHubReport, JiraReport, JiraGitHubCombinedReport + + +class ReportBuilder: + def __init__(self, jira_repo=None, github_repo=None): + self.jira_repo = jira_repo + self.github_repo = github_repo + + def build(self): + pass + + +class CompleteReportBuilder(ReportBuilder): + def __init__(self, jira_repo=None, github_repo=None): + ReportBuilder.__init__(self, jira_repo, github_repo) + self.report = CompleteReport() + self.build() + + def build(self): + # all open github pull requests + github_open = GitHubReport(self.github_repo.open_pulls("apache", "storm")) + github_bad_jira = GitHubReport(None, "\nGITHUB PULL REQUESTS WITH BAD OR CLOSED JIRA ID") + github_without_jira = GitHubReport(None, "\nGITHUB PULL REQUESTS WITHOUT A JIRA ID") + github_unresolved_jira = GitHubReport(None, "\nGITHUB PULL REQUESTS WITH UNRESOLVED JIRA ID") + github_unresolved_jira_voted = GitHubReport(None, "\nGITHUB PULL REQUESTS WITH VOTES FOR UNRESOLVED JIRAS") + github_open_jira = GitHubReport(None, "\nGITHUB PULL REQUESTS WITH OPEN JIRA ID") + github_unresolved_not_open_jira = GitHubReport(None, "\nGITHUB PULL REQUESTS WITH UNRESOLVED BUT NOT OPEN JIRA ID") + + # all unresolved JIRA issues + jira_unresolved = JiraReport(self.jira_repo.unresolved_jiras("STORM")) + jira_open = JiraReport(dict((x, y) for x, y in self.jira_repo.unresolved_jiras("STORM").items() + if y.get_status().lower() == 'open')) + jira_in_progress = JiraReport(dict((x, y) for x, y in self.jira_repo.in_progress_jiras("STORM").items() + if y.get_status() == 'In Progress'), + "\nIN PROGRESS JIRA ISSUES") + + for pull in github_open.pull_requests: + if pull.has_jira_id(): + pull_jira_id = pull.jira_id() + if pull_jira_id not in jira_unresolved.issues: + github_bad_jira.pull_requests.append(pull) + else: + github_unresolved_jira.pull_requests.append(pull) + if jira_unresolved.issues[pull_jira_id].has_voted_comment(): + github_unresolved_jira_voted.pull_requests.append(pull) + if pull_jira_id in jira_open.issues: + github_open_jira.pull_requests.append(pull) + else: + github_unresolved_not_open_jira.pull_requests.append(pull) + else: + github_without_jira.pull_requests.append(pull) + + jira_github_open = JiraGitHubCombinedReport(jira_open, github_open_jira, + "\nOPEN JIRA ISSUES THAT HAVE GITHUB PULL REQUESTS") + jira_github_unresolved_not_open = JiraGitHubCombinedReport(jira_unresolved, github_unresolved_not_open_jira, + "\nIN PROGRESS OR REOPENED JIRA ISSUES THAT HAVE GITHUB PULL REQUESTS") + jira_github_unresolved_voted = JiraGitHubCombinedReport(jira_unresolved, github_unresolved_jira_voted, + "\nGITHUB PULL REQUESTS WITH VOTES FOR UNRESOLVED JIRAS", True) + # jira_github_unresolved = JiraGitHubCombinedReport(jira_unresolved, github_unresolved_jira, + # "\nUnresolved JIRA issues with GitHub pull requests") + + jira_open_no_pull = JiraReport(jira_open.view(github_open_jira.jira_ids()), + "\nOPEN JIRA ISSUES THAT DON'T HAVE GITHUB PULL REQUESTS") + + # build complete report + self.report.jira_reports.append(jira_in_progress) + self.report.jira_reports.append(jira_open_no_pull) + + self.report.github_reports.append(github_bad_jira) + self.report.github_reports.append(github_without_jira) + + self.report.jira_github_combined_reports.append(jira_github_open) + self.report.jira_github_combined_reports.append(jira_github_unresolved_voted) + self.report.jira_github_combined_reports.append(jira_github_unresolved_not_open) + # self.report.jira_github_combined_reports.append(jira_github_unresolved) diff --git a/dev-tools/storm-merge.py b/dev-tools/storm-merge.py new file mode 100755 index 00000000000..c96a71b3558 --- /dev/null +++ b/dev-tools/storm-merge.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python3 +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from github import GitHub +from optparse import OptionParser + + +def main(): + parser = OptionParser(usage="usage: %prog [options] [pull number]") + parser.add_option("-g", "--github-user", dest="gituser", + type="string", help="github user, if not supplied no auth is used", metavar="USER") + + (options, args) = parser.parse_args() + github = GitHub(options) + + for pullNumber in args: + pull = github.pull("apache", "storm", pullNumber) + print("git pull --no-ff " + pull.from_repo() + " " + pull.from_branch()) + + +if __name__ == "__main__": + main() + diff --git a/dev-tools/validate-license-files.py b/dev-tools/validate-license-files.py new file mode 100755 index 00000000000..d10bda620b5 --- /dev/null +++ b/dev-tools/validate-license-files.py @@ -0,0 +1,215 @@ +#!/usr/bin/env python3 + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from contextlib import contextmanager +from pathlib import Path +import os +import subprocess +import shlex +import filecmp +import re +import itertools +import argparse + +project_root = Path(__file__).resolve().parent.parent +update_dependency_licenses_cmd = ('mvn license:aggregate-add-third-party@generate-and-check-licenses -Dlicense.skipAggregateAddThirdParty=false -B') + + +@contextmanager +def cd(newdir): + prevdir = Path.cwd() + os.chdir(newdir.expanduser()) + try: + yield + finally: + os.chdir(prevdir) + + +def generate_dependency_licenses(): + """Generates DEPENDENCY-LICENSES in target. The committed DEPENDENCY-LICENSES is not modified.""" + print('Generating DEPENDENCY-LICENSES') + update_dependency_licenses_output_to_target_cmd = (update_dependency_licenses_cmd + + ' -Dlicense.thirdPartyFilename=DEPENDENCY-LICENSES' + + ' -Dlicense.outputDirectory=target') + subprocess.check_call(shlex.split( + update_dependency_licenses_output_to_target_cmd)) + print('Done generating DEPENDENCY-LICENSES') + + +def print_file_contents(msg, file1, file2, show_summary_diff=True, show_file_contents=True): + """ + Print contents of the files. Used for dumping the actual and expected DEPENDENCY-LICENSES files. + :param msg: message to print about the files + :param file1: original file + :param file2: new file + :param show_summary_diff: (optional, default True): if true, then print the summary of differences in the files + :param show_file_contents: (optional, default True): if true, then print the contents of the files + :return: + """ + f_names = [file1, file2] + print('*' * 80) + print('*' * 30 + msg + ' ' + file1 + ',' + file2 + '*' * 30) + + if show_summary_diff: + with open(file1, 'r') as f1: + with open(file2, 'r') as f2: + diff = set(f1).difference(f2) + diff.discard('\n') + print(f'***** Difference between file {file1} and {file2} *******') + for line in diff: + print(line) + print('*' * 80) + if show_file_contents: + print('*' * 80) + for i, f_name in enumerate(f_names): + print('*' * 30 + ' Start of file ' + f_name + ' ' + '*' * 30) + print('(' + str(i) + ') File ' + f_name + ' content is:') + print('\t' + '\t'.join(open(f_name).readlines())) + print('*' * 30 + ' End of file ' + f_name + ' ' + '*' * 30) + print('*' * 80) + + +def check_dependency_licenses(): + """Compares the regenerated DEPENDENCY-LICENSES in target with the DEPENDENCY-LICENSES in the root, and verifies + that they are identical""" + print('Checking DEPENDENCY-LICENSES') + if not filecmp.cmp(Path('DEPENDENCY-LICENSES'), Path('target') / 'DEPENDENCY-LICENSES', shallow=False): + print( + f"DEPENDENCY-LICENSES and target/DEPENDENCY-LICENSES are different. " + f"Please update DEPENDENCY-LICENSES by running '{update_dependency_licenses_cmd}' in the project root") + print_file_contents('Actual is different from expected', 'DEPENDENCY-LICENSES', 'target/DEPENDENCY-LICENSES') + return False + return True + + +def build_storm(): + print("Building Storm") + subprocess.check_call(shlex.split( + 'mvn clean install -B -DskipTests -Dcheckstyle.skip -Dpmd.skip' + )) + print("Done building Storm") + + +def extract_license_report_maven_coordinates(lines): + # Lines like " * Checker Qual (org.checkerframework:checker-qual:2.5.2 - https://checkerframework.org)" + matches = map(lambda line: re.match( + r'\s+\*.*\((?P.*) \- .*\).*', line), lines) + return set(map(lambda match: match.group('gav'), filter(lambda match: match != None, matches))) + + +def parse_license_binary_dependencies_coordinate_set(): + """Gets the dependencies listed in LICENSE-binary""" + license_binary_begin_binary_section = '----------------------------END OF SOURCE NOTICES -------------------------------------------' + license_binary_lines = read_lines(project_root / 'LICENSE-binary') + return extract_license_report_maven_coordinates( + itertools.dropwhile(lambda line: license_binary_begin_binary_section not in line, license_binary_lines)) + + +def extract_dependency_list_maven_coordinates(lines): + # Lines like " com.google.code.findbugs:jsr305:jar:3.0.2 -- module jsr305 (auto)" + matches = map(lambda line: re.match( + r'\s+(?P\S*)\:(?P\S*)\:(?P\S*)\:(?P\S*)', line), lines) + return set(map(lambda match: match.group('group') + ':' + match.group('artifact') + ':' + match.group('version'), filter(lambda match: match != None, matches))) + + +def read_lines(path): + with open(path) as f: + return f.readlines() + + +def generate_storm_dist_dependencies_coordinate_set(): + """Gets the dependencies for storm-dist/binary, plus the dependencies of storm-shaded-deps""" + generated_coordinate_set = extract_license_report_maven_coordinates(read_lines( + project_root / 'storm-dist' / 'binary' / 'target' / 'generated-sources' / 'license' / 'THIRD-PARTY.txt')) + + # Add dependencies from storm-shaded-deps + with cd(project_root / 'storm-shaded-deps'): + print("Generating dependency list for storm-shaded-deps") + subprocess.check_call(shlex.split( + 'mvn dependency:list -DoutputFile=target/deps-list -Dmdep.outputScope=false -DincludeScope=compile -B')) + print("Done generating dependency list for storm-shaded-deps") + shaded_dep_coordinates = extract_dependency_list_maven_coordinates( + read_lines(project_root / 'storm-shaded-deps' / 'target' / 'deps-list')) + shaded_dep_coordinates = set(filter(lambda coordinate: 'org.apache.storm:' not in coordinate, shaded_dep_coordinates)) + print('The storm-shaded-deps dependencies that are included when distributing storm-dist/binary are ' + str(shaded_dep_coordinates)) + print('') + generated_coordinate_set.update(shaded_dep_coordinates) + + return generated_coordinate_set + + +def generate_storm_dist_license_report(): + with cd(project_root / 'storm-dist' / 'binary'): + print('') + print('Generating storm-dist license report') + subprocess.check_call(shlex.split(update_dependency_licenses_cmd)) + print('Done generating storm-dist license report') + + +def make_license_binary_checker(): + """ + Checks that the dependencies in the storm-dist/binary license report are mentioned in LICENSE-binary, + and vice versa. + """ + print('Checking LICENSE-binary') + + license_binary_coordinate_set = parse_license_binary_dependencies_coordinate_set() + generated_coordinate_set = generate_storm_dist_dependencies_coordinate_set() + superfluous_coordinates_in_license = license_binary_coordinate_set.difference( + generated_coordinate_set) + coordinates_missing_in_license = generated_coordinate_set.difference( + license_binary_coordinate_set) + print('Done checking LICENSE-binary') + + def check_for_errors(): + if superfluous_coordinates_in_license: + print('Dependencies in LICENSE-binary that appear unused: ') + for coord in sorted(superfluous_coordinates_in_license): + print(coord) + print('') + if coordinates_missing_in_license: + print('Dependencies missing from LICENSE-binary: ') + for coord in sorted(coordinates_missing_in_license): + print(coord) + any_wrong_coordinates = coordinates_missing_in_license or superfluous_coordinates_in_license + if any_wrong_coordinates: + print('LICENSE-binary needs to be updated. Please remove any unnecessary dependencies from LICENSE-binary, ' + 'and add any that are missing. You can copy any missing dependencies from DEPENDENCY-LICENSES') + return not any_wrong_coordinates + return check_for_errors + + +with cd(project_root): + parser = argparse.ArgumentParser(description='Validate that the Storm license files are up to date (excluding NOTICE-binary and the licenses/ directory)') + parser.add_argument('--skip-build-storm', action='http://github.com/store_true', help='set to skip building Storm') + args = parser.parse_args() + success = True + + if not args.skip_build_storm: + build_storm() + generate_dependency_licenses() + generate_storm_dist_license_report() + license_binary_checker = make_license_binary_checker() + success = check_dependency_licenses() and success + success = license_binary_checker() and success + if not success: + print('Some license files are not up to date, see above for the relevant error message') + exit(1) + print('License files are up to date') + exit(0) diff --git a/doap_Storm.rdf b/doap_Storm.rdf new file mode 100644 index 00000000000..0afb138518d --- /dev/null +++ b/doap_Storm.rdf @@ -0,0 +1,57 @@ + + + + + + 2014-04-12 + + Apache Storm + + + Apache Storm is a distributed real-time computation system. + Apache Storm is a distributed real-time computation system. Similar to how Hadoop provides a set of general primitives for doing batch processing, Storm provides a set of general primitives for doing real-time computation. + + + + Java + + + + Apache Storm 2.8.2 + 2025-08-03 + 2.8.2 + + + + + + + + + + + P. Taylor Goetz + + + + + diff --git a/docs/Acking-framework-implementation.md b/docs/Acking-framework-implementation.md new file mode 100644 index 00000000000..f181e9823fb --- /dev/null +++ b/docs/Acking-framework-implementation.md @@ -0,0 +1,39 @@ +--- +title: Acking framework implementation +layout: documentation +documentation: true +--- + +[Storm's acker]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/daemon/Acker.java) tracks completion of each tupletree with a checksum hash: each time a tuple is sent, its value is XORed into the checksum, and each time a tuple is acked its value is XORed in again. If all tuples have been successfully acked, the checksum will be zero (the odds that the checksum will be zero otherwise are vanishingly small). + +You can read a bit more about the [reliability mechanism](Guaranteeing-message-processing.html#what-is-storms-reliability-api) elsewhere on the wiki -- this explains the internal details. + +### acker `execute()` + +The acker is actually a regular bolt. When a new tupletree is born, the spout sends the XORed edge-ids of each tuple recipient, which the acker records in its `pending` ledger. Every time an executor acks a tuple, the acker receives a partial checksum that is the XOR of the tuple's own edge-id (clearing it from the ledger) and the edge-id of each downstream tuple the executor emitted (thus entering them into the ledger). + +This is accomplished as follows. + +On a tick tuple, just advance pending tupletree checksums towards death and return. Otherwise, update or create the record for this tupletree: + +* on init: initialize with the given checksum value, and record the spout's id for later. +* on ack: xor the partial checksum into the existing checksum value +* on fail: just mark it as failed + +Next, put the record into the RotatingMap (thus resetting is countdown to expiry) and take action: + +* if the total checksum is zero, the tupletree is complete: remove it from the pending collection and notify the spout of success +* if the tupletree has failed, it is also complete: remove it from the pending collection and notify the spout of failure + +Finally, pass on an ack of our own. + +### Pending tuples and the `RotatingMap` + +The acker stores pending tuples in a [`RotatingMap`]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/utils/RotatingMap.java), a simple device used in several places within Storm to efficiently time-expire a process. + +The RotatingMap behaves as a HashMap, and offers the same O(1) access guarantees. + +Internally, it holds several HashMaps ('buckets') of its own, each holding a cohort of records that will expire at the same time. Let's call the longest-lived bucket death row, and the most recent the nursery. Whenever a value is `.put()` to the RotatingMap, it is relocated to the nursery -- and removed from any other bucket it might have been in (effectively resetting its death clock). + +Whenever its owner calls `.rotate()`, the RotatingMap advances each cohort one step further towards expiration. (Typically, Storm objects call rotate on every receipt of a system tick stream tuple.) If there are any key-value pairs in the former death row bucket, the RotatingMap invokes a callback (given in the constructor) for each key-value pair, letting its owner take appropriate action (eg, failing a tuple. + diff --git a/docs/Classpath-handling.md b/docs/Classpath-handling.md new file mode 100644 index 00000000000..c9e319b9667 --- /dev/null +++ b/docs/Classpath-handling.md @@ -0,0 +1,29 @@ +--- +title: Classpath Handling +layout: documentation +documentation: true +--- +### Storm is an Application Container + +Storm provides an application container environment, a la Apache Tomcat, which creates a potential for classpath conflicts between Storm and your application. The most common way of using Storm involves submitting an "uber JAR" containing your application code with all of its dependencies bundled in, and then Storm distributes this JAR to Worker nodes. Then Storm runs your application within a Storm process called a `Worker` -- thus the JVM's classpath contains the dependencies of your JAR as well as whatever dependencies the Worker itself has. So careful handling of classpaths and dependencies is critical for the correct functioning of Storm. + +### Adding Extra Dependencies to Classpath + +You no longer *need* to bundle your dependencies into your topology and create an uber JAR, there are now facilities for separately handling your topology's dependencies. Furthermore, there are facilities for adding external dependencies into the Storm daemons. + +The `storm.py` launcher script allows you to include dependencies into the launched program's classpath via a few different mechanisms: + +1. The `--jar` and `--artifacts` options for the `storm jar` command: allow the inclusion of non-bundled dependencies with your topology; i.e., allowing specification of JARs that were not bundled into the topology uber-jar. This is required when using the `storm sql` command, which constructs a topology automatically without needing you to write code and build a topology JAR. +2. The `${STORM_DIR}/extlib/` and `${STORM_DIR}/extlib-daemon/` directories can have dependencies added to them for the inclusion of plugins & 3rd-party libraries into the Storm daemons (e.g., Nimbus, UI, Supervisor, etc. -- use `extlib-daemon/`) and other commands launched via the `storm.py` script, e.g., `storm sql` and `storm jar` (use `extlib`). Notably, this means that the Storm Worker process does not include the `extlib-daemon/` directory into its classpath. +3. The `STORM_EXT_CLASSPATH` and `STORM_EXT_CLASSPATH_DAEMON` environment variables provide a similar functionality as those directories, but allows the user to place their external dependencies in alternative locations. + * There is a wrinkle here: because the Supervisor daemon launches the Worker process, if you want `STORM_EXT_CLASSPATH` to impact your Workers, you will need to specify the `STORM_EXT_CLASSPATH` for the Supervisor daemon. That will allow the Supervisor to consult this environment variable as it constructs the classpath of the Worker processes. + +#### Which Facility to Choose? + +You might have noticed the overlap between the first mechanism and the others. If you consider the `--jar` / `--artifacts` option versus the `extlib/` / `STORM_EXT_CLASSPATH` it is not obvious which one you should choose for using dependencies with your Worker processes. i.e., both mechanisms allow including JARs to be used for running your Worker processes. Here is my understanding of the difference: `--jar` / `--artifacts` will result in the dependencies being used for running the `storm jar/sql` command, *and* the dependencies will be uploaded and available in the classpath of the topology's `Worker` processes. Whereas the use of `extlib/` / `STORM_EXT_CLASSPATH` requires you to have distributed your JAR dependencies out to all Worker nodes. Another difference is that `extlib/` / `STORM_EXT_CLASSPATH` would impact all topologies, whereas `--jar` / `--artifacts` is a topology-specific option. + +### Abbreviation of Classpaths and Process Commands + +When the `storm.py` script launches a `java` command, it first constructs the classpath from the optional settings mentioned above, as well as including some default locations such as the `${STORM_DIR}/`, `${STORM_DIR}/lib/`, `${STORM_DIR}/extlib/` and `${STORM_DIR}/extlib-daemon/` directories. In past releases, Storm would enumerate all JARs in those directories and then explicitly add all of those JARs into the `-cp` / `--classpath` argument to the launched `java` commands. As such, the classpath would get so long that the `java` commands could breach the Linux Kernel process table limit of 4096 bytes for recording commands. That led to truncated commands in `ps` output, making it hard to operate Storm clusters because you could not easily differentiate the processes nor easily see from `ps` which port a worker is listening to. + +After Storm dropped support for Java 5, this classpath expansion was no longer necessary, because Java 6 supports classpath wildcards. Classpath wildcards allow you to specify a directory ending with a `*` element, such as `foo/bar/*`, and the JVM will automatically expand the classpath to include all `.jar` files in the wildcard directory. As of [STORM-2191](https://issues.apache.org/jira/browse/STORM-2191) Storm just uses classpath wildcards instead of explicitly listing all JARs, thereby shortening all of the commands and making operating Storm clusters a bit easier. diff --git a/docs/Clojure-DSL.md b/docs/Clojure-DSL.md new file mode 100644 index 00000000000..e8485079d4f --- /dev/null +++ b/docs/Clojure-DSL.md @@ -0,0 +1,266 @@ +--- +title: Clojure DSL +layout: documentation +documentation: true +--- +Storm offers a Clojure DSL through the storm-clojure package for defining spouts, bolts, and topologies. The Clojure DSL has access to everything the Java API exposes, so if you're a Clojure user you can code Storm topologies without touching Java at all. The Clojure DSL is defined in the source in the [org.apache.storm.clojure]({{page.git-blob-base}}/storm-clojure/src/clj/org/apache/storm/clojure.clj) namespace. + +This page outlines all the pieces of the Clojure DSL, including: + +1. Defining topologies +2. `defbolt` +3. `defspout` +4. Running topologies in local mode or on a cluster +5. Testing topologies + +### Defining topologies + +To define a topology, use the `topology` function. `topology` takes in two arguments: a map of "spout specs" and a map of "bolt specs". Each spout and bolt spec wires the code for the component into the topology by specifying things like inputs and parallelism. + +Let's take a look at an example topology definition [from the storm-starter project]({{page.git-blob-base}}/examples/storm-starter/src/clj/org/apache/storm/starter/clj/word_count.clj): + +```clojure +(topology + {"1" (spout-spec sentence-spout) + "2" (spout-spec (sentence-spout-parameterized + ["the cat jumped over the door" + "greetings from a faraway land"]) + :p 2)} + {"3" (bolt-spec {"1" :shuffle "2" :shuffle} + split-sentence + :p 5) + "4" (bolt-spec {"3" ["word"]} + word-count + :p 6)}) +``` + +The maps of spout and bolt specs are maps from the component id to the corresponding spec. The component ids must be unique across the maps. Just like defining topologies in Java, component ids are used when declaring inputs for bolts in the topology. + +#### spout-spec + +`spout-spec` takes as arguments the spout implementation (an object that implements [IRichSpout](javadocs/org/apache/storm/topology/IRichSpout.html)) and optional keyword arguments. The only option that exists currently is the `:p` option, which specifies the parallelism for the spout. If you omit `:p`, the spout will execute as a single task. + +#### bolt-spec + +`bolt-spec` takes as arguments the input declaration for the bolt, the bolt implementation (an object that implements [IRichBolt](javadocs/org/apache/storm/topology/IRichBolt.html)), and optional keyword arguments. + +The input declaration is a map from stream ids to stream groupings. A stream id can have one of two forms: + +1. `[==component id== ==stream id==]`: Subscribes to a specific stream on a component +2. `==component id==`: Subscribes to the default stream on a component + +A stream grouping can be one of the following: + +1. `:shuffle`: subscribes with a shuffle grouping +2. Vector of field names, like `["id" "name"]`: subscribes with a fields grouping on the specified fields +3. `:global`: subscribes with a global grouping +4. `:all`: subscribes with an all grouping +5. `:direct`: subscribes with a direct grouping + +See [Concepts](Concepts.html) for more info on stream groupings. Here's an example input declaration showcasing the various ways to declare inputs: + +```clojure +{["2" "1"] :shuffle + "3" ["field1" "field2"] + ["4" "2"] :global} +``` + +This input declaration subscribes to three streams total. It subscribes to stream "1" on component "2" with a shuffle grouping, subscribes to the default stream on component "3" with a fields grouping on the fields "field1" and "field2", and subscribes to stream "2" on component "4" with a global grouping. + +Like `spout-spec`, the only current supported keyword argument for `bolt-spec` is `:p` which specifies the parallelism for the bolt. + +#### shell-bolt-spec + +`shell-bolt-spec` is used for defining bolts that are implemented in a non-JVM language. It takes as arguments the input declaration, the command line program to run, the name of the file implementing the bolt, an output specification, and then the same keyword arguments that `bolt-spec` accepts. + +Here's an example `shell-bolt-spec`: + +```clojure +(shell-bolt-spec {"1" :shuffle "2" ["id"]} + "python3" + "mybolt.py" + ["outfield1" "outfield2"] + :p 25) +``` + +The syntax of output declarations is described in more detail in the `defbolt` section below. See [Using non JVM languages with Storm](Using-non-JVM-languages-with-Storm.html) for more details on how multilang works within Storm. + +### defbolt + +`defbolt` is used for defining bolts in Clojure. Bolts have the constraint that they must be serializable, and this is why you can't just reify `IRichBolt` to implement a bolt (closures aren't serializable). `defbolt` works around this restriction and provides a nicer syntax for defining bolts than just implementing a Java interface. + +At its fullest expressiveness, `defbolt` supports parameterized bolts and maintaining state in a closure around the bolt implementation. It also provides shortcuts for defining bolts that don't need this extra functionality. The signature for `defbolt` looks like the following: + +(defbolt _name_ _output-declaration_ *_option-map_ & _impl_) + +Omitting the option map is equivalent to having an option map of `{:prepare false}`. + +#### Simple bolts + +Let's start with the simplest form of `defbolt`. Here's an example bolt that splits a tuple containing a sentence into a tuple for each word: + +```clojure +(defbolt split-sentence ["word"] [tuple collector] + (let [words (.split (.getString tuple 0) " ")] + (doseq [w words] + (emit-bolt! collector [w] :anchor tuple)) + (ack! collector tuple) + )) +``` + +Since the option map is omitted, this is a non-prepared bolt. The DSL simply expects an implementation for the `execute` method of `IRichBolt`. The implementation takes two parameters, the tuple and the `OutputCollector`, and is followed by the body of the `execute` function. The DSL automatically type-hints the parameters for you so you don't need to worry about reflection if you use Java interop. + +This implementation binds `split-sentence` to an actual `IRichBolt` object that you can use in topologies, like so: + +```clojure +(bolt-spec {"1" :shuffle} + split-sentence + :p 5) +``` + + +#### Parameterized bolts + +Many times you want to parameterize your bolts with other arguments. For example, let's say you wanted to have a bolt that appends a suffix to every input string it receives, and you want that suffix to be set at runtime. You do this with `defbolt` by including a `:params` option in the option map, like so: + +```clojure +(defbolt suffix-appender ["word"] {:params [suffix]} + [tuple collector] + (emit-bolt! collector [(str (.getString tuple 0) suffix)] :anchor tuple) + ) +``` + +Unlike the previous example, `suffix-appender` will be bound to a function that returns an `IRichBolt` rather than be an `IRichBolt` object directly. This is caused by specifying `:params` in its option map. So to use `suffix-appender` in a topology, you would do something like: + +```clojure +(bolt-spec {"1" :shuffle} + (suffix-appender "-suffix") + :p 10) +``` + +#### Prepared bolts + +To do more complex bolts, such as ones that do joins and streaming aggregations, the bolt needs to store state. You can do this by creating a prepared bolt which is specified by including `{:prepare true}` in the option map. Consider, for example, this bolt that implements word counting: + +```clojure +(defbolt word-count ["word" "count"] {:prepare true} + [conf context collector] + (let [counts (atom {})] + (bolt + (execute [tuple] + (let [word (.getString tuple 0)] + (swap! counts (partial merge-with +) {word 1}) + (emit-bolt! collector [word (@counts word)] :anchor tuple) + (ack! collector tuple) + ))))) +``` + +The implementation for a prepared bolt is a function that takes as input the topology config, `TopologyContext`, and `OutputCollector`, and returns an implementation of the `IBolt` interface. This design allows you to have a closure around the implementation of `execute` and `cleanup`. + +In this example, the word counts are stored in the closure in a map called `counts`. The `bolt` macro is used to create the `IBolt` implementation. The `bolt` macro is a more concise way to implement the interface than reifying, and it automatically type-hints all of the method parameters. This bolt implements the execute method which updates the count in the map and emits the new word count. + +Note that the `execute` method in prepared bolts only takes as input the tuple since the `OutputCollector` is already in the closure of the function (for simple bolts the collector is a second parameter to the `execute` function). + +Prepared bolts can be parameterized just like simple bolts. + +#### Output declarations + +The Clojure DSL has a concise syntax for declaring the outputs of a bolt. The most general way to declare the outputs is as a map from stream id a stream spec. For example: + +```clojure +{"1" ["field1" "field2"] + "2" (direct-stream ["f1" "f2" "f3"]) + "3" ["f1"]} +``` + +The stream id is a string, while the stream spec is either a vector of fields or a vector of fields wrapped by `direct-stream`. `direct stream` marks the stream as a direct stream (See [Concepts](Concepts.html) and [Direct groupings]() for more details on direct streams). + +If the bolt only has one output stream, you can define the default stream of the bolt by using a vector instead of a map for the output declaration. For example: + +```clojure +["word" "count"] +``` +This declares the output of the bolt as the fields ["word" "count"] on the default stream id. + +#### Emitting, acking, and failing + +Rather than use the Java methods on `OutputCollector` directly, the DSL provides a nicer set of functions for using `OutputCollector`: `emit-bolt!`, `emit-direct-bolt!`, `ack!`, and `fail!`. + +1. `emit-bolt!`: takes as parameters the `OutputCollector`, the values to emit (a Clojure sequence), and keyword arguments for `:anchor` and `:stream`. `:anchor` can be a single tuple or a list of tuples, and `:stream` is the id of the stream to emit to. Omitting the keyword arguments emits an unanchored tuple to the default stream. +2. `emit-direct-bolt!`: takes as parameters the `OutputCollector`, the task id to send the tuple to, the values to emit, and keyword arguments for `:anchor` and `:stream`. This function can only emit to streams declared as direct streams. +2. `ack!`: takes as parameters the `OutputCollector` and the tuple to ack. +3. `fail!`: takes as parameters the `OutputCollector` and the tuple to fail. + +See [Guaranteeing message processing](Guaranteeing-message-processing.html) for more info on acking and anchoring. + +### defspout + +`defspout` is used for defining spouts in Clojure. Like bolts, spouts must be serializable so you can't just reify `IRichSpout` to do spout implementations in Clojure. `defspout` works around this restriction and provides a nicer syntax for defining spouts than just implementing a Java interface. + +The signature for `defspout` looks like the following: + +(defspout _name_ _output-declaration_ *_option-map_ & _impl_) + +If you leave out the option map, it defaults to {:prepare true}. The output declaration for `defspout` has the same syntax as `defbolt`. + +Here's an example `defspout` implementation from [storm-starter]({{page.git-blob-base}}/examples/storm-starter/src/clj/org/apache/storm/starter/clj/word_count.clj): + +```clojure +(defspout sentence-spout ["sentence"] + [conf context collector] + (let [sentences ["a little brown dog" + "the man petted the dog" + "four score and seven years ago" + "an apple a day keeps the doctor away"]] + (spout + (nextTuple [] + (Thread/sleep 100) + (emit-spout! collector [(rand-nth sentences)]) + ) + (ack [id] + ;; You only need to define this method for reliable spouts + ;; (such as one that reads off of a queue like Kestrel) + ;; This is an unreliable spout, so it does nothing here + )))) +``` + +The implementation takes in as input the topology config, `TopologyContext`, and `SpoutOutputCollector`. The implementation returns an `ISpout` object. Here, the `nextTuple` function emits a random sentence from `sentences`. + +This spout isn't reliable, so the `ack` and `fail` methods will never be called. A reliable spout will add a message id when emitting tuples, and then `ack` or `fail` will be called when the tuple is completed or failed respectively. See [Guaranteeing message processing](Guaranteeing-message-processing.html) for more info on how reliability works within Storm. + +`emit-spout!` takes in as parameters the `SpoutOutputCollector` and the new tuple to be emitted, and accepts as keyword arguments `:stream` and `:id`. `:stream` specifies the stream to emit to, and `:id` specifies a message id for the tuple (used in the `ack` and `fail` callbacks). Omitting these arguments emits an unanchored tuple to the default output stream. + +There is also a `emit-direct-spout!` function that emits a tuple to a direct stream and takes an additional argument as the second parameter of the task id to send the tuple to. + +Spouts can be parameterized just like bolts, in which case the symbol is bound to a function returning `IRichSpout` instead of the `IRichSpout` itself. You can also declare an unprepared spout which only defines the `nextTuple` method. Here is an example of an unprepared spout that emits random sentences parameterized at runtime: + +```clojure +(defspout sentence-spout-parameterized ["word"] {:params [sentences] :prepare false} + [collector] + (Thread/sleep 500) + (emit-spout! collector [(rand-nth sentences)])) +``` + +The following example illustrates how to use this spout in a `spout-spec`: + +```clojure +(spout-spec (sentence-spout-parameterized + ["the cat jumped over the door" + "greetings from a faraway land"]) + :p 2) +``` + +### Running topologies in local mode or on a cluster + +That's all there is to the Clojure DSL. To submit topologies in remote mode or local mode, just use the `StormSubmitter` class just like you would from Java. + +To create topology configs, it's easiest to use the `org.apache.storm.config` namespace which defines constants for all of the possible configs. The constants are the same as the static constants in the `Config` class, except with dashes instead of underscores. For example, here's a topology config that sets the number of workers to 15 and configures the topology in debug mode: + +```clojure +{TOPOLOGY-DEBUG true + TOPOLOGY-WORKERS 15} +``` + +### Testing topologies + +[This blog post](http://www.pixelmachine.org/2011/12/17/Testing-Storm-Topologies.html) and its [follow-up](http://www.pixelmachine.org/2011/12/21/Testing-Storm-Topologies-Part-2.html) give a good overview of Storm's powerful built-in facilities for testing topologies in Clojure. diff --git a/docs/ClusterMetrics.md b/docs/ClusterMetrics.md new file mode 100644 index 00000000000..6b18099224c --- /dev/null +++ b/docs/ClusterMetrics.md @@ -0,0 +1,278 @@ +--- +title: Cluster Metrics +layout: documentation +documentation: true +--- + +# Cluster Metrics + +There are lots of metrics to help you monitor a running cluster. Many of these metrics are still a work in progress and so is the metrics system itself so any of them may change, even between minor version releases. We will try to keep them as stable as possible, but they should all be considered somewhat unstable. Some of the metrics may also be for experimental features, or features that are not complete yet, so please read the description of the metric before using it for monitoring or alerting. + +Also be aware that depending on the metrics system you use, the names are likely to be translated into a different format that is compatible with the system. Typically this means that the ':' separating character will be replaced with a '.' character. + +Most metrics should have the units that they are reported as a part of the description. For Timers often this is configured by the reporter that is uploading them to your system. Pay attention because even if the metric name has a time unit in it, it may be false. + +Also, most metrics, except for gauges and counters, are a collection of numbers, and not a single value. Often these result in multiple metrics being uploaded to a reporting system, such as percentiles for a histogram, or rates for a meter. It is dependent on the configured metrics reporter how this happens, or how the name here corresponds to the metric in your reporting system. + +## Cluster Metrics (From Nimbus) + +These are metrics that come from the active nimbus instance and report the state of the cluster as a whole, as seen by nimbus. + +| Metric Name | Type | Description | +|-------------|------|-------------| +| cluster:num-nimbus-leaders | gauge | Number of nimbuses marked as a leader. This should really only ever be 1 in a healthy cluster, or 0 for a short period of time while a failover happens. | +| cluster:num-nimbuses | gauge | Number of nimbuses, leader or standby. | +| cluster:num-supervisors | gauge | Number of supervisors. | +| cluster:num-topologies | gauge | Number of topologies. | +| cluster:num-total-used-workers | gauge | Number of used workers/slots. | +| cluster:num-total-workers | gauge | Number of workers/slots. | +| cluster:total-fragmented-cpu-non-negative | gauge | Total fragmented CPU (% of core). This is CPU that the system thinks it cannot use because other resources on the node are used up. | +| cluster:total-fragmented-memory-non-negative | gauge | Total fragmented memory (MB). This is the memory that the system thinks it cannot use because other resources on the node are used up. | +| topologies:assigned-cpu | histogram | CPU scheduled per topology (% of a core) | +| topologies:assigned-mem-off-heap | histogram | Off heap memory scheduled per topology (MB) | +| topologies:assigned-mem-on-heap | histogram | On heap memory scheduled per topology (MB) | +| topologies:num-executors | histogram | Number of executors per topology. | +| topologies:num-tasks | histogram | Number of tasks per topology. | +| topologies:num-workers | histogram | Number of workers per topology. | +| topologies:replication-count | histogram | Replication count per topology. | +| topologies:requested-cpu | histogram | CPU requested per topology (% of a core). | +| topologies:requested-mem-off-heap | histogram | Off heap memory requested per topology (MB). | +| topologies:requested-mem-on-heap | histogram | On heap memory requested per topology (MB). | +| topologies:uptime-secs | histogram | Uptime per topology (seconds). | +| nimbus:available-cpu-non-negative | gauge | Available cpu on the cluster (% of a core). | +| nimbus:total-cpu | gauge | total CPU on the cluster (% of a core) | +| nimbus:total-memory | gauge | total memory on the cluster MB | +| supervisors:fragmented-cpu | histogram | fragmented cpu per supervisor (% of a core) | +| supervisors:fragmented-mem | histogram | fragmented memory per supervisor (MB) | +| supervisors:num-used-workers | histogram | workers used per supervisor | +| supervisors:num-workers | histogram | number of workers per supervisor | +| supervisors:uptime-secs | histogram | uptime of supervisors | +| supervisors:used-cpu | histogram | cpu used per supervisor (% of a core) | +| supervisors:used-mem | histogram | memory used per supervisor MB | + +## Nimbus Metrics + +These are metrics that are specific to a nimbus instance. In many instances, only the active nimbus will be reporting these metrics, but they could come from standby nimbus instances as well. + +| Metric Name | Type | Description | +|-------------|------|-------------| +| nimbus:files-upload-duration-ms | timer | Time it takes to upload a file from start to finish (Not Blobs, but this may change) | +| nimbus:longest-scheduling-time-ms | gauge | Longest time ever taken so far to schedule. This includes the current scheduling run, which is intended to detect if scheduling is stuck for some reason. | +| nimbus:mkAssignments-Errors | meter | tracks exceptions from mkAssignments | +| nimbus:num-activate-calls | meter | calls to the activate thrift method. | +| nimbus:num-added-executors-per-scheduling | histogram | number of executors added after a scheduling run. | +| nimbus:num-added-slots-per-scheduling | histogram | number of slots added after a scheduling run. | +| nimbus:num-beginFileUpload-calls | meter | calls to the beginFileUpload thrift method. | +| nimbus:num-blacklisted-supervisor | gauge | Number of supervisors currently marked as blacklisted because they appear to be somewhat unstable. | +| nimbus:num-deactivate-calls | meter | calls to deactivate thrift method. | +| nimbus:num-debug-calls | meter | calls to debug thrift method.| +| nimbus:num-downloadChunk-calls | meter | calls to downloadChunk thrift method. | +| nimbus:num-finishFileUpload-calls | meter | calls to finishFileUpload thrift method.| +| nimbus:num-gained-leadership | meter | number of times this nimbus gained leadership. | +| nimbus:num-getClusterInfo-calls | meter | calls to getClusterInfo thrift method. | +| nimbus:num-getComponentPageInfo-calls | meter | calls to getComponentPageInfo thrift method. | +| nimbus:num-getComponentPendingProfileActions-calls | meter | calls to getComponentPendingProfileActions thrift method. | +| nimbus:num-getLeader-calls | meter | calls to getLeader thrift method. | +| nimbus:num-getLogConfig-calls | meter | calls to getLogConfig thrift method. | +| nimbus:num-getNimbusConf-calls | meter | calls to getNimbusConf thrift method. | +| nimbus:num-getOwnerResourceSummaries-calls | meter | calls to getOwnerResourceSummaries thrift method. | +| nimbus:num-getSupervisorPageInfo-calls | meter | calls to getSupervisorPageInfo thrift method. | +| nimbus:num-getTopology-calls | meter | calls to getTopology thrift method. | +| nimbus:num-getTopologyConf-calls | meter | calls to getTopologyConf thrift method. | +| nimbus:num-getTopologyInfo-calls | meter | calls to getTopologyInfo thrift method. | +| nimbus:num-getTopologyInfoWithOpts-calls | meter | calls to getTopologyInfoWithOpts thrift method includes calls to getTopologyInfo. | +| nimbus:num-getTopologyPageInfo-calls | meter | calls to getTopologyPageInfo thrift method. | +| nimbus:num-getUserTopology-calls | meter | calls to getUserTopology thrift method. | +| nimbus:num-isTopologyNameAllowed-calls | meter | calls to isTopologyNameAllowed thrift method. | +| nimbus:num-killTopology-calls | meter | calls to killTopology thrift method. | +| nimbus:num-killTopologyWithOpts-calls | meter | calls to killTopologyWithOpts thrift method includes calls to killTopology. | +| nimbus:num-launched | meter | number of times a nimbus was launched | +| nimbus:num-lost-leadership | meter | number of times this nimbus lost leadership | +| nimbus:num-negative-resource-events | meter | Any time a resource goes negative (either CPU or Memory). This metric is not ideal as it is measured in a data structure that is used for internal calculations that may go negative and not actually represent over scheduling of a resource. | +| nimbus:num-net-executors-increase-per-scheduling | histogram | added executors minus removed executors after a scheduling run | +| nimbus:num-net-slots-increase-per-scheduling | histogram | added slots minus removed slots after a scheduling run | +| nimbus:num-rebalance-calls | meter | calls to rebalance thrift method. | +| nimbus:num-removed-executors-per-scheduling | histogram | number of executors removed after a scheduling run | +| nimbus:num-scheduling-timeouts | meter | number of timeouts during scheduling | +| nimbus:num-removed-slots-per-scheduling | histogram | number of slots removed after a scheduling run | +| nimbus:num-setLogConfig-calls | meter | calls to setLogConfig thrift method. | +| nimbus:num-setWorkerProfiler-calls | meter | calls to setWorkerProfiler thrift method. | +| nimbus:num-shutdown-calls | meter | times nimbus is shut down (this may not actually be reported as nimbus is in the middle of shutting down) | +| nimbus:num-submitTopology-calls | meter | calls to submitTopology thrift method. | +| nimbus:num-submitTopologyWithOpts-calls | meter | calls to submitTopologyWithOpts thrift method includes calls to submitTopology. | +| nimbus:num-uploadChunk-calls | meter | calls to uploadChunk thrift method. | +| nimbus:num-uploadNewCredentials-calls | meter | calls to uploadNewCredentials thrift method. | +| nimbus:process-worker-metric-calls | meter | calls to processWorkerMetrics thrift method. | +| nimbus:scheduler-internal-errors | meter | tracks internal scheduling errors | +| nimbus:topology-scheduling-duration-ms | timer | time it takes to do a scheduling run. | +| nimbus:total-available-memory-non-negative | gauge | available memory on the cluster MB | +| nimbuses:uptime-secs | histogram | uptime of nimbuses | +| MetricsCleaner:purgeTimestamp | gauge | last time metrics were purged (Unfinished Feature) | +| RocksDB:metric-failures | meter | generally any failure that happens in the rocksdb metrics store. (Unfinished Feature) | + + +## DRPC Metrics + +Metrics related to DRPC servers. + +| Metric Name | Type | Description | +|-------------|------|-------------| +| drpc:HTTP-request-response-duration | timer | how long it takes to execute an http drpc request | +| drpc:num-execute-calls | meter | calls to execute a DRPC request | +| drpc:num-execute-http-requests | meter | http requests to the DRPC server | +| drpc:num-failRequest-calls | meter | calls to failRequest | +| drpc:num-fetchRequest-calls | meter | calls to fetchRequest | +| drpc:num-result-calls | meter | calls to returnResult | +| drpc:num-server-timedout-requests | meter | times a DRPC request timed out without a response | +| drpc:num-shutdown-calls | meter | number of times shutdown is called on the drpc server | + +## Logviewer Metrics + +Metrics related to the logviewer process. This process currently also handles cleaning up worker logs when they get too large or too old. + +| Metric Name | Type | Description | +|-------------|------|-------------| +| logviewer:cleanup-routine-duration-ms | timer | how long it takes to run the log cleanup routine | +| logviewer:deep-search-request-duration-ms | timer | how long it takes for /deepSearch/{topoId} | +| logviewer:disk-space-freed-in-bytes | histogram | number of bytes cleaned up each time through the cleanup routine. | +| logviewer:download-file-size-rounded-MB | histogram | size in MB of files being downloaded | +| logviewer:num-daemonlog-page-http-requests | meter | calls to /daemonlog | +| logviewer:num-deep-search-no-result | meter | number of deep search requests that did not return any results | +| logviewer:num-deep-search-requests-with-archived | meter | calls to /deepSearch/{topoId} with ?search-archived=true | +| logviewer:num-deep-search-requests-without-archived | meter | calls to /deepSearch/{topoId} with ?search-archived=false | +| logviewer:num-download-daemon-log-exceptions | meter | num errors in calls to /daemondownload | +| logviewer:num-download-dump-exceptions | meter | num errors in calls to /dumps/{topo-id}/{host-port}/{filename} | +| logviewer:num-download-log-daemon-file-http-requests | meter | calls to /daemondownload | +| logviewer:num-download-log-exceptions | meter | num errors in calls to /download | +| logviewer:num-download-log-file-http-requests | meter | calls to /download | +| logviewer:num-file-download-exceptions | meter | errors while trying to download files. | +| logviewer:num-file-download-exceptions | meter | number of exceptions trying to download a log file | +| logviewer:num-file-open-exceptions | meter | errors trying to open a file (when deleting logs) | +| logviewer:num-file-open-exceptions | meter | number of exceptions trying to open a log file for serving | +| logviewer:num-file-read-exceptions | meter | number of exceptions trying to read from a log file for serving | +| logviewer:num-file-removal-exceptions | meter | number of exceptions trying to cleanup files. | +| logviewer:num-files-cleaned-up | histogram | number of files cleaned up each time through the cleanup routine. | +| logviewer:num-files-scanned-per-deep-search | histogram | number of files scanned per deep search | +| logviewer:num-list-dump-files-exceptions | meter | num errors in calls to /dumps/{topo-id}/{host-port} | +| logviewer:num-list-logs-http-request | meter | calls to /listLogs | +| logviewer:num-log-page-http-requests | meter | calls to /log | +| logviewer:num-other-cleanup-exceptions | meter | number of exception in the cleanup loop, not directly deleting files. | +| logviewer:num-page-read | meter | number of pages (parts of a log file) that are served up | +| logviewer:num-read-daemon-log-exceptions | meter | num errors in calls to /daemonlog | +| logviewer:num-read-log-exceptions | meter | num errors in calls to /log | +| logviewer:num-search-exceptions | meter | num errors in calls to /search | +| logviewer:num-search-log-exceptions | meter | num errors in calls to /listLogs | +| logviewer:num-search-logs-requests | meter | calls to /search | +| logviewer:num-search-request-no-result | meter | number of regular search results that were empty | +| logviewer:num-set-permission-exceptions | meter | num errors running set permissions to open up files for reading. | +| logviewer:num-shutdown-calls | meter | number of times shutdown was called on the logviewer | +| logviewer:search-requests-duration-ms | timer | how long it takes for /search | +| logviewer:worker-log-dir-size | gauge | size in bytes of the worker logs directory. | + +## Supervisor Metrics + +Metrics associated with the supervisor, which launches the workers for a topology. The supervisor also has a state machine for each slot. Some of the metrics are associated with that state machine and can be confusing if you do not understand the state machine. + +| Metric Name | Type | Description | +|-------------|------|-------------| +| supervisor:blob-cache-update-duration | timer | how long it takes to update all of the blobs in the cache (frequently just check if they have changed, but may also include downloading them.) | +| supervisor:blob-fetching-rate-MB/s | histogram | Download rate of a blob in MB/sec. Blobs are downloaded rarely so it is very bursty. | +| supervisor:blob-localization-duration | timer | Approximately how long it takes to get the blob we want after it is requested. | +| supervisor:current-reserved-memory-mb | gauge | total amount of memory reserved for workers on the supervisor (MB) | +| supervisor:current-used-memory-mb | gauge | memory currently used as measured by the supervisor (this typically requires cgroups) (MB) | +| supervisor:health-check-timeouts | meter | tracks timeouts executing health check scripts | +| supervisor:local-resource-file-not-found-when-releasing-slot | meter | number of times file-not-found exception happens when reading local blobs upon releasing slots | +| supervisor:num-blob-update-version-changed | meter | number of times a version of a blob changes. | +| supervisor:num-cleanup-exceptions | meter | exceptions thrown during container cleanup. | +| supervisor:num-force-kill-exceptions | meter | exceptions thrown during force kill. | +| supervisor:num-kill-exceptions | meter | exceptions thrown during kill. | +| supervisor:num-kill-worker-errors | meter | errors killing workers. | +| supervisor:num-launched | meter | number of times the supervisor is launched. | +| supervisor:num-shell-exceptions | meter | number of exceptions calling shell commands. | +| supervisor:num-slots-used-gauge | gauge | number of slots used on the supervisor. | +| supervisor:num-worker-start-timed-out | meter | number of times worker start timed out. | +| supervisor:num-worker-transitions-into-empty | meter | number of transitions into empty state. | +| supervisor:num-worker-transitions-into-kill | meter | number of transitions into kill state. | +| supervisor:num-worker-transitions-into-kill-and-relaunch | meter | number of transitions into kill-and-relaunch state | +| supervisor:num-worker-transitions-into-kill-blob-update | meter | number of transitions into kill-blob-update state | +| supervisor:num-worker-transitions-into-running | meter | number of transitions into running state | +| supervisor:num-worker-transitions-into-waiting-for-blob-localization | meter | number of transitions into waiting-for-blob-localization state | +| supervisor:num-worker-transitions-into-waiting-for-blob-update | meter | number of transitions into waiting-for-blob-update state | +| supervisor:num-worker-transitions-into-waiting-for-worker-start | meter | number of transitions into waiting-for-worker-start state | +| supervisor:num-workers-force-kill | meter | number of times a worker was force killed. This may mean that the worker did not exit cleanly/quickly. | +| supervisor:num-workers-killed-assignment-changed | meter | workers killed because the assignment changed. | +| supervisor:num-workers-killed-blob-changed | meter | workers killed because the blob changed and they needed to be relaunched. | +| supervisor:num-workers-killed-hb-null | meter | workers killed because there was no hb at all from the worker. This would typically only happen when a worker is launched for the first time. | +| supervisor:num-workers-killed-hb-timeout | meter | workers killed because the hb from the worker was too old. This often happens because of GC issues in the worker that prevents it from sending a heartbeat, but could also mean the worker process exited, and the supervisor is not the parent of the process to know that it exited. | +| supervisor:num-workers-killed-memory-violation | meter | workers killed because the worker was using too much memory. If the supervisor can monitor the memory usage of the worker (typically through cgroups) and the worker goes over the limit it may be shot. | +| supervisor:num-workers-killed-process-exit | meter | workers killed because the process exited and the supervisor was the parent process | +| supervisor:num-workers-launched | meter | number of workers launched | +| supervisor:single-blob-localization-duration | timer | how long it takes for a blob to be updated (downloaded, unzipped, inform slots, and make the move) | +| supervisor:time-worker-spent-in-state-empty-ms | timer | time spent in empty state as it transitions out. Not necessarily in ms. | +| supervisor:time-worker-spent-in-state-kill-and-relaunch-ms | timer | time spent in kill-and-relaunch state as it transitions out. Not necessarily in ms. | +| supervisor:time-worker-spent-in-state-kill-blob-update-ms | timer | time spent in kill-blob-update state as it transitions out. Not necessarily in ms. | +| supervisor:time-worker-spent-in-state-kill-ms | timer | time spent in kill state as it transitions out. Not necessarily in ms. | +| supervisor:time-worker-spent-in-state-running-ms | timer | time spent in running state as it transitions out. Not necessarily in ms. | +| supervisor:time-worker-spent-in-state-waiting-for-blob-localization-ms | timer | time spent in waiting-for-blob-localization state as it transitions out. Not necessarily in ms. | +| supervisor:time-worker-spent-in-state-waiting-for-blob-update-ms | timer | time spent in waiting-for-blob-update state as it transitions out. Not necessarily in ms. | +| supervisor:time-worker-spent-in-state-waiting-for-worker-start-ms | timer | time spent in waiting-for-worker-start state as it transitions out. Not necessarily in ms. | +| supervisor:update-blob-exceptions | meter | number of exceptions updating blobs. | +| supervisor:worker-launch-duration | timer | Time taken for a worker to launch. | +| supervisor:worker-per-call-clean-up-duration-ns | meter | how long it takes to cleanup a worker (ns). | +| supervisor:worker-shutdown-duration-ns | meter | how long it takes to shutdown a worker (ns). | +| supervisor:workerTokenAuthorizer-get-password-failures | meter | Failures getting password for user in WorkerTokenAuthorizer | + + +## UI Metrics + +Metrics associated with a single UI daemon. + +| Metric Name | Type | Description | +|-------------|------|-------------| +| ui:num-activate-topology-http-requests | meter | calls to /topology/{id}/activate | +| ui:num-all-topologies-summary-http-requests | meter | calls to /topology/summary | +| ui:num-build-visualization-http-requests | meter | calls to /topology/{id}/visualization | +| ui:num-cluster-configuration-http-requests | meter | calls to /cluster/configuration | +| ui:num-cluster-summary-http-requests | meter | calls to /cluster/summary | +| ui:num-component-op-response-http-requests | meter | calls to /topology/{id}/component/{component}/debug/{action}/{spct} | +| ui:num-component-page-http-requests | meter | calls to /topology/{id}/component/{component} | +| ui:num-deactivate-topology-http-requests | meter | calls to topology/{id}/deactivate | +| ui:num-debug-topology-http-requests | meter | calls to /topology/{id}/debug/{action}/{spct} | +| ui:num-get-owner-resource-summaries-http-request | meter | calls to /owner-resources or /owner-resources/{id} | +| ui:num-log-config-http-requests | meter | calls to /topology/{id}/logconfig | +| ui:num-main-page-http-requests | meter | number of requests to /index.html | +| ui:num-mk-visualization-data-http-requests | meter | calls to /topology/{id}/visualization-init | +| ui:num-nimbus-summary-http-requests | meter | calls to /nimbus/summary | +| ui:num-supervisor-http-requests | meter | calls to /supervisor | +| ui:num-supervisor-summary-http-requests | meter | calls to /supervisor/summary | +| ui:num-topology-lag-http-requests | meter | calls to /topology/{id}/lag | +| ui:num-topology-metric-http-requests | meter | calls to /topology/{id}/metrics | +| ui:num-topology-op-response-http-requests | meter | calls to /topology/{id}/logconfig or /topology/{id}/rebalance/{wait-time} or /topology/{id}/kill/{wait-time} | +| ui:num-topology-page-http-requests | meter | calls to /topology/{id} | +| num-web-requests | meter | nominally the total number of web requests being made. | + +## Pacemaker Metrics (Deprecated) + +The pacemaker process is deprecated and only still exists for backward compatibility. + +| Metric Name | Type | Description | +|-------------|------|-------------| +| pacemaker:get-pulse=count | meter | number of times getPulse was called. yes the = is in the name, but typically this is mapped to a '-' by the metrics reporters. | +| pacemaker:heartbeat-size | histogram | size in bytes of heartbeats | +| pacemaker:send-pulse-count | meter | number of times sendPulse was called | +| pacemaker:size-total-keys | gauge | total number of keys in this pacemaker instance | +| pacemaker:total-receive-size | meter | total size in bytes of heartbeats received | +| pacemaker:total-sent-size | meter | total size in bytes of heartbeats read | + + +## Metric Reporters + +For metrics to be reported, configure reporters using `storm.daemon.metrics.reporter.plugins`. The following metric reporters are supported: + * Console Reporter (`org.apache.storm.daemon.metrics.reporters.ConsolePreparableReporter`): + Reports metrics to `System.out`. + * CSV Reporter (`org.apache.storm.daemon.metrics.reporters.CsvPreparableReporter`): + Reports metrics to a CSV file. + * JMX Reporter (`org.apache.storm.daemon.metrics.reporters.JmxPreparableReporter`): + Exposes metrics via JMX. + +Custom reporter can be created by implementing `org.apache.storm.daemon.metrics.reporters.PreparableReporter` interface. diff --git a/docs/Command-line-client.md b/docs/Command-line-client.md new file mode 100644 index 00000000000..352e1519e2a --- /dev/null +++ b/docs/Command-line-client.md @@ -0,0 +1,358 @@ +--- +title: Command Line Client +layout: documentation +documentation: true +--- +This page describes all the commands that are possible with the "storm" command line client. To learn how to set up your "storm" client to talk to a remote cluster, follow the instructions in [Setting up development environment](Setting-up-development-environment.html). See [Classpath handling](Classpath-handling.html) for details on using external libraries in these commands. + +These commands are: + +1. jar +1. local +1. sql +1. kill +1. activate +1. deactivate +1. rebalance +1. repl +1. classpath +1. server_classpath +1. localconfvalue +1. remoteconfvalue +1. nimbus +1. supervisor +1. ui +1. drpc +1. drpc-client +1. blobstore +1. dev-zookeeper +1. get-errors +1. heartbeats +1. kill_workers +1. list +1. logviewer +1. monitor +1. node-health-check +1. pacemaker +1. set_log_level +1. shell +1. upload-credentials +1. version +1. admin +1. help + +### jar + +Syntax: `storm jar topology-jar-path class ...` + +Runs the main method of `class` with the specified arguments. The storm jars and configs in `~/.storm` are put on the classpath. The process is configured so that [StormSubmitter](javadocs/org/apache/storm/StormSubmitter.html) will upload the jar at `topology-jar-path` when the topology is submitted. + +When you want to ship other jars which is not included to application jar, you can pass them to `--jars` option with comma-separated string. +For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar. +And when you want to ship maven artifacts and its transitive dependencies, you can pass them to `--artifacts` with comma-separated string. You can also exclude some dependencies like what you're doing in maven pom. Please add exclusion artifacts with '^' separated string after the artifact. For example, `--artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka_2.10:0.8.2.2^org.slf4j:slf4j-log4j12"` will load jedis and kafka artifact and all of transitive dependencies but exclude slf4j-log4j12 from kafka. + +When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with comma-separated string. Repository format is "^". '^' is taken as separator because URL allows various characters. For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver. + +Complete example of both options is here: `./bin/storm jar example/storm-starter/storm-starter-topologies-*.jar org.apache.storm.starter.RollingTopWords blobstore-remote2 remote --jars "./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar" --artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/"` + +When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology. + +### local + +Syntax: `storm local topology-jar-path class ...` + +The local command acts just like `storm jar` except instead of submitting a topology to a cluster it will run the cluster in local mode. This means an embedded version of the storm daemons will be run within the same process as your topology for 30 seconds before it shuts down automatically. As such the classpath of your topology will be extended to include everything needed to run those daemons. + +### sql + +Syntax: `storm sql sql-file topology-name` + +Compiles the SQL statements into a Trident topology and submits it to Storm. + +`--jars` and `--artifacts`, and `--artifactRepositories` options available for jar are also applied to sql command. Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories options. You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases. + +### kill + +Syntax: `storm kill topology-name [-w wait-time-secs]` + +Kills the topology with the name `topology-name`. Storm will first deactivate the topology's spouts for the duration of the topology's message timeout to allow all messages currently being processed to finish processing. Storm will then shutdown the workers and clean up their state. You can override the length of time Storm waits between deactivation and shutdown with the -w flag. + +### activate + +Syntax: `storm activate topology-name` + +Activates the specified topology's spouts. + +### deactivate + +Syntax: `storm deactivate topology-name` + +Deactivates the specified topology's spouts. + +### rebalance + +Syntax: `storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]*` + +Sometimes you may wish to spread out where the workers for a topology are running. For example, let's say you have a 10 node cluster running 4 workers per node, and then let's say you add another 10 nodes to the cluster. You may wish to have Storm spread out the workers for the running topology so that each node runs 2 workers. One way to do this is to kill the topology and resubmit it, but Storm provides a "rebalance" command that provides an easier way to do this. + +Rebalance will first deactivate the topology for the duration of the message timeout (overridable with the -w flag) and then redistribute the workers evenly around the cluster. The topology will then return to its previous state of activation (so a deactivated topology will still be deactivated and an activated topology will go back to being activated). + +The rebalance command can also be used to change the parallelism of a running topology. Use the -n and -e switches to change the number of workers or number of executors of a component respectively. + +### repl + +*DEPRECATED: This subcommand may be removed in a future release.* + +Syntax: `storm repl` + +Opens up a Clojure REPL with the storm jars and configuration on the classpath. Useful for debugging. + +### classpath + +Syntax: `storm classpath` + +Prints the classpath used by the storm client when running commands. + +### server_classpath + +Syntax: `storm server_classpath` + +Prints the classpath used by the storm daemons. + +### localconfvalue + +Syntax: `storm localconfvalue conf-name` + +Prints out the value for `conf-name` in the local Storm configs. The local Storm configs are the ones in `~/.storm/storm.yaml` merged in with the configs in `defaults.yaml`. + +### remoteconfvalue + +Syntax: `storm remoteconfvalue conf-name` + +Prints out the value for `conf-name` in the cluster's Storm configs. The cluster's Storm configs are the ones in `$STORM-PATH/conf/storm.yaml` merged in with the configs in `defaults.yaml`. This command must be run on a cluster machine. + +### nimbus + +Syntax: `storm nimbus` + +Launches the nimbus daemon. This command should be run under supervision with a tool like [daemontools](http://cr.yp.to/daemontools.html) or [monit](http://mmonit.com/monit/). See [Setting up a Storm cluster](Setting-up-a-Storm-cluster.html) for more information. + +### supervisor + +Syntax: `storm supervisor` + +Launches the supervisor daemon. This command should be run under supervision with a tool like [daemontools](http://cr.yp.to/daemontools.html) or [monit](http://mmonit.com/monit/). See [Setting up a Storm cluster](Setting-up-a-Storm-cluster.html) for more information. + +### ui + +Syntax: `storm ui` + +Launches the UI daemon. The UI provides a web interface for a Storm cluster and shows detailed stats about running topologies. This command should be run under supervision with a tool like [daemontools](http://cr.yp.to/daemontools.html) or [monit](http://mmonit.com/monit/). See [Setting up a Storm cluster](Setting-up-a-Storm-cluster.html) for more information. + +### drpc + +Syntax: `storm drpc` + +Launches a DRPC daemon. This command should be run under supervision with a tool like [daemontools](http://cr.yp.to/daemontools.html) or [monit](http://mmonit.com/monit/). See [Distributed RPC](Distributed-RPC.html) for more information. + +### drpc-client + +Syntax: `storm drpc-client [options] ([function argument]*)|(argument*)` + +Provides a very simple way to send DRPC requests. If a `-f` argument is supplied, to set the function name, all of the arguments are treated +as arguments to the function. If no function is given the arguments must be pairs of function argument. + +*NOTE:* This is not really intended for production use. This is mostly because parsing out the results can be a pain. + +Creating an actual DRPC client only takes a few lines, so for production please go with that. + +```java +Config conf = new Config(); +try (DRPCClient drpc = DRPCClient.getConfiguredClient(conf)) { + //User the drpc client + String result = drpc.execute(function, argument); +} +``` + +#### Examples + +`storm drpc-client exclaim a exclaim b test bar` + +This will submit 3 separate DRPC request. +1. function = "exclaim" args = "a" +2. function = "exclaim" args = "b" +3. function = "test" args = "bar" + +`storm drpc-client -f exclaim a b` + +This will submit 2 separate DRPC request. +1. function = "exclaim" args = "a" +2. function = "exclaim" args = "b" + +### blobstore + +Syntax: `storm blobstore cmd` + +list [KEY...] - lists blobs currently in the blob store + +cat [-f FILE] KEY - read a blob and then either write it to a file, or STDOUT (requires read access). + +create [-f FILE] [-a ACL ...] [--replication-factor NUMBER] KEY - create a new blob. Contents comes from a FILE or STDIN. ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list. + +update [-f FILE] KEY - update the contents of a blob. Contents comes from a FILE or STDIN (requires write access). + +delete KEY - delete an entry from the blob store (requires write access). + +set-acl [-s ACL] KEY - ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list (requires admin access). + +replication --read KEY - Used to read the replication factor of the blob. + +replication --update --replication-factor NUMBER KEY where NUMBER > 0. It is used to update the replication factor of a blob. + +For example, the following would create a mytopo:data.tgz key using the data stored in data.tgz. User alice would have full access, bob would have read/write access and everyone else would have read access. + +storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r + +See [Blobstore(Distcahce)](distcache-blobstore.html) for more information. + +### dev-zookeeper + +Syntax: `storm dev-zookeeper` + +Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and "storm.zookeeper.port" as its port. This is only intended for development/testing, the Zookeeper instance launched is not configured to be used in production. + +### get-errors + +Syntax: `storm get-errors topology-name` + +Get the latest error from the running topology. The returned result contains the key value pairs for component-name and component-error for the components in error. The result is returned in json format. + +### heartbeats + +Syntax: `storm heartbeats [cmd]` + +list PATH - lists heartbeats nodes under PATH currently in the ClusterState. +get PATH - Get the heartbeat data at PATH + +### kill_workers + +Syntax: `storm kill_workers` + +Kill the workers running on this supervisor. This command should be run on a supervisor node. If the cluster is running in secure mode, then user needs to have admin rights on the node to be able to successfully kill all workers. + +### list + +Syntax: `storm list` + +List the running topologies and their statuses. + +### logviewer + +Syntax: `storm logviewer` + +Launches the log viewer daemon. It provides a web interface for viewing storm log files. This command should be run under supervision with a tool like daemontools or monit. + +See Setting up a Storm cluster for more information.(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster) + +### monitor + +Syntax: `storm monitor topology-name [-i interval-secs] [-m component-id] [-s stream-id] [-w [emitted | transferred]]` + +Monitor given topology's throughput interactively. +One can specify poll-interval, component-id, stream-id, watch-item[emitted | transferred] + By default, + poll-interval is 4 seconds; + all component-ids will be list; + stream-id is 'default'; + watch-item is 'emitted'; + +### node-health-check + +Syntax: `storm node-health-check` + +Run health checks on the local supervisor. + +### pacemaker + +Syntax: `storm pacemaker` + +Launches the Pacemaker daemon. This command should be run under +supervision with a tool like daemontools or monit. + +See Setting up a Storm cluster for more information.(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster) + +### set_log_level + +Syntax: `storm set_log_level -l [logger name]=[log level][:optional timeout] -r [logger name] topology-name` + +Dynamically change topology log levels + +where log level is one of: ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF +and timeout is integer seconds. + +e.g. + ./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name + + Set the root logger's level to DEBUG for 30 seconds + + ./bin/storm set_log_level -l com.myapp=WARN topology-name + + Set the com.myapp logger's level to WARN for 30 seconds + + ./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name + + Set the com.myapp logger's level to WARN indefinitely, and com.myOtherLogger to ERROR for 123 seconds + + ./bin/storm set_log_level -r com.myOtherLogger topology-name + + Clears settings, resetting back to the original level + +### shell + +Syntax: `storm shell resourcesdir command args` + +Makes constructing jar and uploading to nimbus for using non JVM languages + +eg: `storm shell resources/ python3 topology.py arg1 arg2` + +### upload-credentials + +Syntax: `storm upload_credentials topology-name [credkey credvalue]*` + +Uploads a new set of credentials to a running topology + * `-e --exception-when-empty`: optional flag. If set, command will fail and throw exception if no credentials were uploaded. + + +### version + +Syntax: `storm version` + +Prints the version number of this Storm release. + +### admin + +Syntax: `storm admin [options]` + +The storm admin command provides access to several operations that can help an administrator debug or fix a cluster. + +`remove_corrupt_topologies` - This command should be run on a nimbus node as the same user nimbus runs as. It will go directly to zookeeper + blobstore and find topologies that appear to be corrupted because of missing blobs. It will kill those topologies. + + `zk_cli [options]` - This command will launch a zookeeper cli pointing to the storm zookeeper instance logged in as the nimbus user. It should be run on a nimbus server as the user nimbus runs as. + + * `-s --server `: Set the connection string to use, + defaults to storm connection string. + * `-t --time-out `: Set the timeout to use, defaults to storm + zookeeper timeout. + * `-w --write`: Allow for writes, defaults to read only, we don't want to + cause problems. + * `-n --no-root`: Don't include the storm root on the default connection string. + * `-j --jaas `: Include a jaas file that should be used when + authenticating with ZK defaults to the + java.security.auth.login.config conf. + +`creds ` - Print the credential keys for a topology. + +### help +Syntax: `storm help [command]` + +Print one help message or list of available commands diff --git a/docs/Common-patterns.md b/docs/Common-patterns.md new file mode 100644 index 00000000000..92c8c007537 --- /dev/null +++ b/docs/Common-patterns.md @@ -0,0 +1,84 @@ +--- +title: Common Topology Patterns +layout: documentation +documentation: true +--- + +This page lists a variety of common patterns in Storm topologies. + +1. Batching +2. BasicBolt +3. In-memory caching + fields grouping combo +4. Streaming top N +5. TimeCacheMap for efficiently keeping a cache of things that have been recently updated +6. CoordinatedBolt and KeyedFairBolt for Distributed RPC + + +### Batching + +Oftentimes for efficiency reasons or otherwise, you want to process a group of tuples in batch rather than individually. For example, you may want to batch updates to a database or do a streaming aggregation of some sort. + +If you want reliability in your data processing, the right way to do this is to hold on to tuples in an instance variable while the bolt waits to do the batching. Once you do the batch operation, you then ack all the tuples you were holding onto. + +If the bolt emits tuples, then you may want to use multi-anchoring to ensure reliability. It all depends on the specific application. See [Guaranteeing message processing](Guaranteeing-message-processing.html) for more details on how reliability works. + +### BasicBolt +Many bolts follow a similar pattern of reading an input tuple, emitting zero or more tuples based on that input tuple, and then acking that input tuple immediately at the end of the execute method. Bolts that match this pattern are things like functions and filters. This is such a common pattern that Storm exposes an interface called [IBasicBolt](javadocs/org/apache/storm/topology/IBasicBolt.html) that automates this pattern for you. See [Guaranteeing message processing](Guaranteeing-message-processing.html) for more information. + +### In-memory caching + fields grouping combo + +It's common to keep caches in-memory in Storm bolts. Caching becomes particularly powerful when you combine it with a fields grouping. For example, suppose you have a bolt that expands short URLs (like bit.ly, t.co, etc.) into long URLs. You can increase performance by keeping an LRU cache of short URL to long URL expansions to avoid doing the same HTTP requests over and over. Suppose component "urls" emits short URLS, and component "expand" expands short URLs into long URLs and keeps a cache internally. Consider the difference between the two following snippets of code: + +```java +builder.setBolt("expand", new ExpandUrl(), parallelism) + .shuffleGrouping(1); +``` + +```java +builder.setBolt("expand", new ExpandUrl(), parallelism) + .fieldsGrouping("urls", new Fields("url")); +``` + +The second approach will have vastly more effective caches since the same URL will always go to the same task. This avoids having duplication across any of the caches in the tasks and makes it much more likely that a short URL will hit the cache. + +### Streaming top N + +A common continuous computation done on Storm is a "streaming top N" of some sort. Suppose you have a bolt that emits tuples of the form ["value", "count"] and you want a bolt that emits the top N tuples based on the count. The simplest way to do this is to have a bolt that does a global grouping on the stream and maintains a list in memory of the top N items. + +This approach obviously doesn't scale to large streams since the entire stream has to go through one task. A better way to do the computation is to do many top N's in parallel across partitions of the stream, and then merge those top N's together to get the global top N. The pattern looks like this: + +```java +builder.setBolt("rank", new RankObjects(), parallelism) + .fieldsGrouping("objects", new Fields("value")); +builder.setBolt("merge", new MergeObjects()) + .globalGrouping("rank"); +``` + +This pattern works because of the fields grouping done by the first bolt which gives the partitioning you need for this to be semantically correct. You can see an example of this pattern in storm-starter [here]({{page.git-blob-base}}/examples/storm-starter/src/jvm/org/apache/storm/starter/RollingTopWords.java). + +If however, you have a known skew in the data being processed it can be advantageous to use partialKeyGrouping instead of fieldsGrouping. This will distribute the load for each key between two downstream bolts instead of a single one. + +```java +builder.setBolt("count", new CountObjects(), parallelism) + .partialKeyGrouping("objects", new Fields("value")); +builder.setBolt("rank" new AggregateCountsAndRank(), parallelism) + .fieldsGrouping("count", new Fields("key")) +builder.setBolt("merge", new MergeRanksObjects()) + .globalGrouping("rank"); +``` + +The topology needs an extra layer of processing to aggregate the partial counts from the upstream bolts but this only processes aggregated values now so the bolt is not subject to the load caused by the skewed data. You can see an example of this pattern in storm-starter [here]({{page.git-blob-base}}/examples/storm-starter/src/jvm/org/apache/storm/starter/SkewedRollingTopWords.java). + +### TimeCacheMap for efficiently keeping a cache of things that have been recently updated + +You sometimes want to keep a cache in memory of items that have been recently "active" and have items that have been inactive for some time automatically expire. [TimeCacheMap](javadocs/org/apache/storm/utils/TimeCacheMap.html) is an efficient data structure for doing this and provides hooks so you can insert callbacks whenever an item is expired. + +### CoordinatedBolt and KeyedFairBolt for Distributed RPC + +When building distributed RPC applications on top of Storm, there are two common patterns that are usually needed. These are encapsulated by [CoordinatedBolt](javadocs/org/apache/storm/task/CoordinatedBolt.html) and [KeyedFairBolt](javadocs/org/apache/storm/task/KeyedFairBolt.html) which are part of the "standard library" that ships with the Storm codebase. + +`CoordinatedBolt` wraps the bolt containing your logic and figures out when your bolt has received all the tuples for any given request. It makes heavy use of direct streams to do this. + +`KeyedFairBolt` also wraps the bolt containing your logic and makes sure your topology processes multiple DRPC invocations at the same time, instead of doing them serially one at a time. + +See [Distributed RPC](Distributed-RPC.html) for more details. diff --git a/docs/Concepts.md b/docs/Concepts.md new file mode 100644 index 00000000000..fd8fa88c1df --- /dev/null +++ b/docs/Concepts.md @@ -0,0 +1,120 @@ +--- +title: Concepts +layout: documentation +documentation: true +--- + +This page lists the main concepts of Storm and links to resources where you can find more information. The concepts discussed are: + +1. Topologies +2. Streams +3. Spouts +4. Bolts +5. Stream groupings +6. Reliability +7. Tasks +8. Workers + +### Topologies + +The logic for a realtime application is packaged into a Storm topology. A Storm topology is analogous to a MapReduce job. One key difference is that a MapReduce job eventually finishes, whereas a topology runs forever (or until you kill it, of course). A topology is a graph of spouts and bolts that are connected with stream groupings. These concepts are described below. + +**Resources:** + +* [TopologyBuilder](javadocs/org/apache/storm/topology/TopologyBuilder.html): use this class to construct topologies in Java +* [Running topologies on a production cluster](Running-topologies-on-a-production-cluster.html) +* [Local mode](Local-mode.html): Read this to learn how to develop and test topologies in local mode. + +### Streams + +The stream is the core abstraction in Storm. A stream is an unbounded sequence of tuples that is processed and created in parallel in a distributed fashion. Streams are defined with a schema that names the fields in the stream's tuples. By default, tuples can contain integers, longs, shorts, bytes, strings, doubles, floats, booleans, and byte arrays. You can also define your own serializers so that custom types can be used natively within tuples. + +Every stream is given an id when declared. Since single-stream spouts and bolts are so common, [OutputFieldsDeclarer](javadocs/org/apache/storm/topology/OutputFieldsDeclarer.html) has convenience methods for declaring a single stream without specifying an id. In this case, the stream is given the default id of "default". + + +**Resources:** + +* [Tuple](javadocs/org/apache/storm/tuple/Tuple.html): streams are composed of tuples +* [OutputFieldsDeclarer](javadocs/org/apache/storm/topology/OutputFieldsDeclarer.html): used to declare streams and their schemas +* [Serialization](Serialization.html): Information about Storm's dynamic typing of tuples and declaring custom serializations + +### Spouts + +A spout is a source of streams in a topology. Generally spouts will read tuples from an external source and emit them into the topology (e.g. a Kestrel queue or the Twitter API). Spouts can either be __reliable__ or __unreliable__. A reliable spout is capable of replaying a tuple if it failed to be processed by Storm, whereas an unreliable spout forgets about the tuple as soon as it is emitted. + +Spouts can emit more than one stream. To do so, declare multiple streams using the `declareStream` method of [OutputFieldsDeclarer](javadocs/org/apache/storm/topology/OutputFieldsDeclarer.html) and specify the stream to emit to when using the `emit` method on [SpoutOutputCollector](javadocs/org/apache/storm/spout/SpoutOutputCollector.html). + +The main method on spouts is `nextTuple`. `nextTuple` either emits a new tuple into the topology or simply returns if there are no new tuples to emit. It is imperative that `nextTuple` does not block for any spout implementation, because Storm calls all the spout methods on the same thread. + +The other main methods on spouts are `ack` and `fail`. These are called when Storm detects that a tuple emitted from the spout either successfully completed through the topology or failed to be completed. `ack` and `fail` are only called for reliable spouts. See [the Javadoc](javadocs/org/apache/storm/spout/ISpout.html) for more information. + +**Resources:** + +* [IRichSpout](javadocs/org/apache/storm/topology/IRichSpout.html): this is the interface that spouts must implement. +* [Guaranteeing message processing](Guaranteeing-message-processing.html) + +### Bolts + +All processing in topologies is done in bolts. Bolts can do anything from filtering, functions, aggregations, joins, talking to databases, and more. + +Bolts can do simple stream transformations. Doing complex stream transformations often requires multiple steps and thus multiple bolts. For example, transforming a stream of tweets into a stream of trending images requires at least two steps: a bolt to do a rolling count of retweets for each image, and one or more bolts to stream out the top X images (you can do this particular stream transformation in a more scalable way with three bolts than with two). + +Bolts can emit more than one stream. To do so, declare multiple streams using the `declareStream` method of [OutputFieldsDeclarer](javadocs/org/apache/storm/topology/OutputFieldsDeclarer.html) and specify the stream to emit to when using the `emit` method on [OutputCollector](javadocs/org/apache/storm/task/OutputCollector.html). + +When you declare a bolt's input streams, you always subscribe to specific streams of another component. If you want to subscribe to all the streams of another component, you have to subscribe to each one individually. [InputDeclarer](javadocs/org/apache/storm/topology/InputDeclarer.html) has syntactic sugar for subscribing to streams declared on the default stream id. Saying `declarer.shuffleGrouping("1")` subscribes to the default stream on component "1" and is equivalent to `declarer.shuffleGrouping("1", DEFAULT_STREAM_ID)`. + +The main method in bolts is the `execute` method which takes in as input a new tuple. Bolts emit new tuples using the [OutputCollector](javadocs/org/apache/storm/task/OutputCollector.html) object. Bolts must call the `ack` method on the `OutputCollector` for every tuple they process so that Storm knows when tuples are completed (and can eventually determine that its safe to ack the original spout tuples). For the common case of processing an input tuple, emitting 0 or more tuples based on that tuple, and then acking the input tuple, Storm provides an [IBasicBolt](javadocs/org/apache/storm/topology/IBasicBolt.html) interface which does the acking automatically. + +Its perfectly fine to launch new threads in bolts that do processing asynchronously. [OutputCollector](javadocs/org/apache/storm/task/OutputCollector.html) is thread-safe and can be called at any time. + +**Resources:** + +* [IRichBolt](javadocs/org/apache/storm/topology/IRichBolt.html): this is general interface for bolts. +* [IBasicBolt](javadocs/org/apache/storm/topology/IBasicBolt.html): this is a convenience interface for defining bolts that do filtering or simple functions. +* [OutputCollector](javadocs/org/apache/storm/task/OutputCollector.html): bolts emit tuples to their output streams using an instance of this class +* [Guaranteeing message processing](Guaranteeing-message-processing.html) + +### Stream groupings + +Part of defining a topology is specifying for each bolt which streams it should receive as input. A stream grouping defines how that stream should be partitioned among the bolt's tasks. + +There are eight built-in stream groupings in Storm, and you can implement a custom stream grouping by implementing the [CustomStreamGrouping](javadocs/org/apache/storm/grouping/CustomStreamGrouping.html) interface: + +1. **Shuffle grouping**: Tuples are randomly distributed across the bolt's tasks in a way such that each bolt is guaranteed to get an equal number of tuples. +2. **Fields grouping**: The stream is partitioned by the fields specified in the grouping. For example, if the stream is grouped by the "user-id" field, tuples with the same "user-id" will always go to the same task, but tuples with different "user-id"'s may go to different tasks. +3. **Partial Key grouping**: The stream is partitioned by the fields specified in the grouping, like the Fields grouping, but are load balanced between two downstream bolts, which provides better utilization of resources when the incoming data is skewed. [This paper](https://melmeric.files.wordpress.com/2014/11/the-power-of-both-choices-practical-load-balancing-for-distributed-stream-processing-engines.pdf) provides a good explanation of how it works and the advantages it provides. +4. **All grouping**: The stream is replicated across all the bolt's tasks. Use this grouping with care. +5. **Global grouping**: The entire stream goes to a single one of the bolt's tasks. Specifically, it goes to the task with the lowest id. +6. **None grouping**: This grouping specifies that you don't care how the stream is grouped. Currently, none groupings are equivalent to shuffle groupings. Eventually though, Storm will push down bolts with none groupings to execute in the same thread as the bolt or spout they subscribe from (when possible). +7. **Direct grouping**: This is a special kind of grouping. A stream grouped this way means that the __producer__ of the tuple decides which task of the consumer will receive this tuple. Direct groupings can only be declared on streams that have been declared as direct streams. Tuples emitted to a direct stream must be emitted using one of the [emitDirect](javadocs/org/apache/storm/task/OutputCollector.html#emitDirect-int-java.util.Collection-java.util.List-) methods. A bolt can get the task ids of its consumers by either using the provided [TopologyContext](javadocs/org/apache/storm/task/TopologyContext.html) or by keeping track of the output of the `emit` method in [OutputCollector](javadocs/org/apache/storm/task/OutputCollector.html) (which returns the task ids that the tuple was sent to). +8. **Local or shuffle grouping**: If the target bolt has one or more tasks in the same worker process, tuples will be shuffled to just those in-process tasks. Otherwise, this acts like a normal shuffle grouping. + +**Resources:** + +* [TopologyBuilder](javadocs/org/apache/storm/topology/TopologyBuilder.html): use this class to define topologies +* [InputDeclarer](javadocs/org/apache/storm/topology/InputDeclarer.html): this object is returned whenever `setBolt` is called on `TopologyBuilder` and is used for declaring a bolt's input streams and how those streams should be grouped + +### Reliability + +Storm guarantees that every spout tuple will be fully processed by the topology. It does this by tracking the tree of tuples triggered by every spout tuple and determining when that tree of tuples has been successfully completed. Every topology has a "message timeout" associated with it. If Storm fails to detect that a spout tuple has been completed within that timeout, then it fails the tuple and replays it later. + +To take advantage of Storm's reliability capabilities, you must tell Storm when new edges in a tuple tree are being created and tell Storm whenever you've finished processing an individual tuple. These are done using the [OutputCollector](javadocs/org/apache/storm/task/OutputCollector.html) object that bolts use to emit tuples. Anchoring is done in the `emit` method, and you declare that you're finished with a tuple using the `ack` method. + +This is all explained in much more detail in [Guaranteeing message processing](Guaranteeing-message-processing.html). + +### Tasks + +Each spout or bolt executes as many tasks across the cluster. Each task corresponds to one thread of execution, and stream groupings define how to send tuples from one set of tasks to another set of tasks. You set the parallelism for each spout or bolt in the `setSpout` and `setBolt` methods of [TopologyBuilder](javadocs/org/apache/storm/topology/TopologyBuilder.html). + +### Workers + +Topologies execute across one or more worker processes. Each worker process is a physical JVM and executes a subset of all the tasks for the topology. For example, if the combined parallelism of the topology is 300 and 50 workers are allocated, then each worker will execute 6 tasks (as threads within the worker). Storm tries to spread the tasks evenly across all the workers. + +**Resources:** + +* [Config.TOPOLOGY_WORKERS](javadocs/org/apache/storm/Config.html#TOPOLOGY_WORKERS): this config sets the number of workers to allocate for executing the topology + +### Performance Tuning + +Refer to [performance tuning guide](Performance.html). + diff --git a/docs/Configuration.md b/docs/Configuration.md new file mode 100644 index 00000000000..5b980d07cee --- /dev/null +++ b/docs/Configuration.md @@ -0,0 +1,41 @@ +--- +title: Configuration +layout: documentation +documentation: true +--- +Storm has a variety of configurations for tweaking the behavior of nimbus, supervisors, and running topologies. Some configurations are system configurations and cannot be modified on topology by topology basis, whereas other configurations can be modified per topology. + +Every configuration has a default value defined in [defaults.yaml]({{page.git-blob-base}}/conf/defaults.yaml) in the Storm codebase. You can override these configurations by defining a storm.yaml in the classpath of Nimbus and the supervisors. Finally, you can define a topology-specific configuration that you submit along with your topology when using [StormSubmitter](javadocs/org/apache/storm/StormSubmitter.html). However, the topology-specific configuration can only override configs prefixed with "TOPOLOGY". + +Storm 0.7.0 and onwards lets you override configuration on a per-bolt/per-spout basis. The only configurations that can be overridden this way are: + +1. "topology.debug" +2. "topology.max.spout.pending" +3. "topology.max.task.parallelism" +4. "topology.kryo.register": This works a little bit differently than the other ones, since the serializations will be available to all components in the topology. More details on [Serialization](Serialization.html). + +The Java API lets you specify component specific configurations in two ways: + +1. *Internally:* Override `getComponentConfiguration` in any spout or bolt and return the component-specific configuration map. +2. *Externally:* `setSpout` and `setBolt` in `TopologyBuilder` return an object with methods `addConfiguration` and `addConfigurations` that can be used to override the configurations for the component. + +The preference order for configuration values is defaults.yaml < storm.yaml < topology specific configuration < internal component specific configuration < external component specific configuration. + +# Bolts, Spouts, and Plugins +In almost all cases configuration for a bolt or a spout should be done through setters on the bolt or spout implementation and not the topology conf. In some rare cases, it may make sense to +expose topology wide configurations that are not currently a part of [Config](javadocs/org/apache/storm/Config.html) or [DaemonConfig](javadocs/org/apache/storm/DaemonConfig.html) such as +when writing a custom scheduler or a plugin to some part of storm. In those +cases you can create your own class like Config but implements [Validated](javadocs/org/apache/storm/validation/Validated.html). Any `public static final String` field declared in this +class will be treated as a config and annotations from the `org.apache.storm.validation.ConfigValidationAnnotations` class can be used to enforce what is stored in that config. +To let the validator know about this class you need to treat the class +like a service that will be loaded through a ServiceLoader for the Validated class and include a `META-INF/services/org.apache.storm.validation.Validated` file in your jar that holds +the name of your Config class. + +**Resources:** + +* [Config](javadocs/org/apache/storm/Config.html): a listing of client configurations as well as a helper class for creating topology specific configurations +* [DaemonConfig](javadocs/org/apache/storm/DaemonConfig.html): a listing of Storm Daemon configurations. +* [defaults.yaml]({{page.git-blob-base}}/conf/defaults.yaml): the default values for all configurations +* [Setting up a Storm cluster](Setting-up-a-Storm-cluster.html): explains how to create and configure a Storm cluster +* [Running topologies on a production cluster](Running-topologies-on-a-production-cluster.html): lists useful configurations when running topologies on a cluster +* [Local mode](Local-mode.html): lists useful configurations when using local mode diff --git a/docs/Contributing-to-Storm.md b/docs/Contributing-to-Storm.md new file mode 100644 index 00000000000..2c6ad1f2d9a --- /dev/null +++ b/docs/Contributing-to-Storm.md @@ -0,0 +1,33 @@ +--- +title: Contributing +layout: documentation +documentation: true +--- + +### Getting started with contributing + +Some of the issues on the [issue tracker](https://issues.apache.org/jira/browse/STORM) are marked with the ["Newbie"](https://issues.apache.org/jira/browse/STORM-2891?jql=project%20%3D%20STORM%20AND%20status%20%3D%20Open%20AND%20labels%20in%20(newbie%2C%20%22newbie%2B%2B%22)) label. If you're interested in contributing to Storm but don't know where to begin, these are good issues to start with. These issues are a great way to get your feet wet with learning the codebase because they require learning about only an isolated portion of the codebase and are a relatively small amount of work. + +### Learning the codebase + +The [Implementation docs](Implementation-docs.html) section of the wiki gives detailed walkthroughs of the codebase. Reading through these docs is highly recommended to understand the codebase. + +### Contribution process + +Contributions to the Storm codebase should be sent as [GitHub](https://github.com/apache/storm) pull requests. If there's any problems to the pull request we can iterate on it using GitHub's commenting features. + +For small patches, feel free to submit pull requests directly for them. For larger contributions, please use the following process. The idea behind this process is to prevent any wasted work and catch design issues early on: + +1. Open an issue on the [issue tracker](https://issues.apache.org/jira/browse/STORM) if one doesn't exist already +2. Comment on the issue with your plan for implementing the issue. Explain what pieces of the codebase you're going to touch and how everything is going to fit together. +3. Storm committers will iterate with you on the design to make sure you're on the right track +4. Implement your issue, submit a pull request, and iterate from there. + +### Modules built on top of Storm + +Modules built on top of Storm (like spouts, bolts, etc) that aren't appropriate for Storm core can be done as your own project or as part of [@stormprocessor](https://github.com/stormprocessor). To be part of @stormprocessor put your project on your own Github and then send an email to the mailing list proposing to make it part of @stormprocessor. Then the community can discuss whether it's useful enough to be part of @stormprocessor. Then you'll be added to the @stormprocessor organization and can maintain your project there. The advantage of hosting your module in @stormprocessor is that it will be easier for potential users to find your project. + +### Contributing documentation + +Documentation contributions are very welcome! The best way to send contributions is as emails through the mailing list. + diff --git a/docs/Creating-a-new-Storm-project.md b/docs/Creating-a-new-Storm-project.md new file mode 100644 index 00000000000..35ab1eba1eb --- /dev/null +++ b/docs/Creating-a-new-Storm-project.md @@ -0,0 +1,25 @@ +--- +title: Creating a New Storm Project +layout: documentation +documentation: true +--- +This page outlines how to set up a Storm project for development. The steps are: + +1. Add Storm jars to classpath +2. If using multilang, add multilang dir to classpath + +Follow along to see how to set up the [storm-starter]({{page.git-blob-base}}/examples/storm-starter) project in Eclipse. + +### Add Storm jars to classpath + +You'll need the Storm jars on your classpath to develop Storm topologies. Using [Maven](Maven.html) is highly recommended. [Here's an example]({{page.git-blob-base}}/examples/storm-starter/pom.xml) of how to setup your pom.xml for a Storm project. If you don't want to use Maven, you can include the jars from the Storm release on your classpath. + +To set up the classpath in Eclipse, create a new Java project, include `src/jvm/` as a source path, and make sure all the jars in `lib/` and `lib/dev/` are in the `Referenced Libraries` section of the project. + +### If using multilang, add multilang dir to classpath + +If you implement spouts or bolts in languages other than Java, then those implementations should be under the `multilang/resources/` directory of the project. For Storm to find these files in local mode, the `resources/` dir needs to be on the classpath. You can do this in Eclipse by adding `multilang/` as a source folder. You may also need to add multilang/resources as a source directory. + +For more information on writing topologies in other languages, see [Using non-JVM languages with Storm](Using-non-JVM-languages-with-Storm.html). + +To test that everything is working in Eclipse, you should now be able to `Run` the `WordCountTopology.java` file. You will see messages being emitted at the console for 10 seconds. diff --git a/docs/DSLs-and-multilang-adapters.md b/docs/DSLs-and-multilang-adapters.md new file mode 100644 index 00000000000..917b41905a2 --- /dev/null +++ b/docs/DSLs-and-multilang-adapters.md @@ -0,0 +1,11 @@ +--- +title: Storm DSLs and Multi-Lang Adapters +layout: documentation +documentation: true +--- +* [Clojure DSL](Clojure-DSL.html) +* [Scala DSL](https://github.com/velvia/ScalaStorm) +* [JRuby DSL](https://github.com/colinsurprenant/redstorm) +* [Storm/Esper integration](https://github.com/tomdz/storm-esper): Streaming SQL on top of Storm +* [io-storm](https://github.com/dan-blanchard/io-storm): Perl multilang adapter +* [FsShelter](https://github.com/Prolucid/FsShelter): F# DSL and runtime with protobuf multilang diff --git a/docs/Daemon-Fault-Tolerance.md b/docs/Daemon-Fault-Tolerance.md new file mode 100644 index 00000000000..8dce601a8b4 --- /dev/null +++ b/docs/Daemon-Fault-Tolerance.md @@ -0,0 +1,30 @@ +--- +title: Daemon Fault Tolerance +layout: documentation +documentation: true +--- +Storm has several different daemon processes. Nimbus that schedules workers, supervisors that launch and kill workers, the log viewer that gives access to logs, and the UI that shows the status of a cluster. + +## What happens when a worker dies? + +When a worker dies, the supervisor will restart it. If it continuously fails on startup and is unable to heartbeat to Nimbus, Nimbus will reschedule the worker. + +## What happens when a node dies? + +The tasks assigned to that machine will time-out and Nimbus will reassign those tasks to other machines. + +## What happens when Nimbus or Supervisor daemons die? + +The Nimbus and Supervisor daemons are designed to be fail-fast (process self-destructs whenever any unexpected situation is encountered) and stateless (all state is kept in Zookeeper or on disk). As described in [Setting up a Storm cluster](Setting-up-a-Storm-cluster.html), the Nimbus and Supervisor daemons must be run under supervision using a tool like daemontools or monit. So if the Nimbus or Supervisor daemons die, they restart like nothing happened. + +Most notably, no worker processes are affected by the death of Nimbus or the Supervisors. This is in contrast to Hadoop, where if the JobTracker dies, all the running jobs are lost. + +## Is Nimbus a single point of failure? + +If you lose the Nimbus node, the workers will still continue to function. Additionally, supervisors will continue to restart workers if they die. However, without Nimbus, workers won't be reassigned to other machines when necessary (like if you lose a worker machine). + +Storm Nimbus is highly available since 1.0.0. More information please refer to [Nimbus HA Design](nimbus-ha-design.html) document. + +## How does Storm guarantee data processing? + +Storm provides mechanisms to guarantee data processing even if nodes die or messages are lost. See [Guaranteeing message processing](Guaranteeing-message-processing.html) for the details. diff --git a/docs/Defining-a-non-jvm-language-dsl-for-storm.md b/docs/Defining-a-non-jvm-language-dsl-for-storm.md new file mode 100644 index 00000000000..aad5525085e --- /dev/null +++ b/docs/Defining-a-non-jvm-language-dsl-for-storm.md @@ -0,0 +1,45 @@ +--- +title: Defining a Non-JVM DSL for Storm +layout: documentation +documentation: true +--- +The right place to start to learn how to make a non-JVM DSL for Storm is [storm-client/src/storm.thrift]({{page.git-blob-base}}/storm-client/src/storm.thrift). Since Storm topologies are just Thrift structures, and Nimbus is a Thrift daemon, you can create and submit topologies in any language. + +When you create the Thrift structs for spouts and bolts, the code for the spout or bolt is specified in the ComponentObject struct: + +``` +union ComponentObject { + 1: binary serialized_java; + 2: ShellComponent shell; + 3: JavaObject java_object; +} +``` + +For a Python DSL, you would want to make use of "2" and "3". ShellComponent lets you specify a script to run that component (e.g., your python code). And JavaObject lets you specify native java spouts and bolts for the component (and Storm will use reflection to create that spout or bolt). + +There's a "storm shell" command that will help with submitting a topology. Its usage is like this: + +``` +storm shell resources/ python3 topology.py arg1 arg2 +``` + +storm shell will then package resources/ into a jar, upload the jar to Nimbus, and call your topology.py script like this: + +``` +python3 topology.py arg1 arg2 {nimbus-host} {nimbus-port} {uploaded-jar-location} +``` + +Then you can connect to Nimbus using the Thrift API and submit the topology, passing {uploaded-jar-location} into the submitTopology method. For reference, here's the submitTopology definition: + +```java +void submitTopology( + 1: string name, + 2: string uploadedJarLocation, + 3: string jsonConf, + 4: StormTopology topology) + throws ( + 1: AlreadyAliveException e, + 2: InvalidTopologyException ite); +``` + +Finally, one of the key things to do in a non-JVM DSL is make it easy to define the entire topology in one file (the bolts, spouts, and the definition of the topology). diff --git a/docs/Distributed-RPC.md b/docs/Distributed-RPC.md new file mode 100644 index 00000000000..9931e692ceb --- /dev/null +++ b/docs/Distributed-RPC.md @@ -0,0 +1,216 @@ +--- +title: Distributed RPC +layout: documentation +documentation: true +--- +The idea behind distributed RPC (DRPC) is to parallelize the computation of really intense functions on the fly using Storm. The Storm topology takes in as input a stream of function arguments, and it emits an output stream of the results for each of those function calls. + +DRPC is not so much a feature of Storm as it is a pattern expressed from Storm's primitives of streams, spouts, bolts, and topologies. DRPC could have been packaged as a separate library from Storm, but it's so useful that it's bundled with Storm. + +### High level overview + +Distributed RPC is coordinated by a "DRPC server" (Storm comes packaged with an implementation of this). The DRPC server coordinates receiving an RPC request, sending the request to the Storm topology, receiving the results from the Storm topology, and sending the results back to the waiting client. From a client's perspective, a distributed RPC call looks just like a regular RPC call. For example, here's how a client would compute the results for the "reach" function with the argument "http://twitter.com/": + +```java +Config conf = new Config(); +conf.put("storm.thrift.transport", "org.apache.storm.security.auth.plain.PlainSaslTransportPlugin"); +conf.put(Config.STORM_NIMBUS_RETRY_TIMES, 3); +conf.put(Config.STORM_NIMBUS_RETRY_INTERVAL, 10); +conf.put(Config.STORM_NIMBUS_RETRY_INTERVAL_CEILING, 20); +DRPCClient client = new DRPCClient(conf, "drpc-host", 3772); +String result = client.execute("reach", "http://twitter.com/"); +``` + +or if you just want to use a preconfigured client you can call. The exact host will be selected randomly from the configured set of hosts, if the host appears to be down it will loop through all configured hosts looking for one that works. + +```java +DRPCClient client = DRPCClient.getConfiguredClient(conf); +String result = client.execute("reach", "http://twitter.com/"); +``` + +The distributed RPC workflow looks like this: + +![Tasks in a topology](images/drpc-workflow.png) + +A client sends the DRPC server the name of the function to execute and the arguments to that function. The topology implementing that function uses a `DRPCSpout` to receive a function invocation stream from the DRPC server. Each function invocation is tagged with a unique id by the DRPC server. The topology then computes the result and at the end of the topology a bolt called `ReturnResults` connects to the DRPC server and gives it the result for the function invocation id. The DRPC server then uses the id to match up that result with which client is waiting, unblocks the waiting client, and sends it the result. + +### LinearDRPCTopologyBuilder + +Storm comes with a topology builder called [LinearDRPCTopologyBuilder](javadocs/org/apache/storm/drpc/LinearDRPCTopologyBuilder.html) that automates almost all the steps involved for doing DRPC. These include: + +1. Setting up the spout +2. Returning the results to the DRPC server +3. Providing functionality to bolts for doing finite aggregations over groups of tuples + +Let's look at a simple example. Here's the implementation of a DRPC topology that returns its input argument with a "!" appended: + +```java +public static class ExclaimBolt extends BaseBasicBolt { + public void execute(Tuple tuple, BasicOutputCollector collector) { + String input = tuple.getString(1); + collector.emit(new Values(tuple.getValue(0), input + "!")); + } + + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("id", "result")); + } +} + +public static void main(String[] args) throws Exception { + LinearDRPCTopologyBuilder builder = new LinearDRPCTopologyBuilder("exclamation"); + builder.addBolt(new ExclaimBolt(), 3); + // ... +} +``` + +As you can see, there's very little to it. When creating the `LinearDRPCTopologyBuilder`, you tell it the name of the DRPC function for the topology. A single DRPC server can coordinate many functions, and the function name distinguishes the functions from one another. The first bolt you declare will take in as input 2-tuples, where the first field is the request id and the second field is the arguments for that request. `LinearDRPCTopologyBuilder` expects the last bolt to emit an output stream containing 2-tuples of the form [id, result]. Finally, all intermediate tuples must contain the request id as the first field. + +In this example, `ExclaimBolt` simply appends a "!" to the second field of the tuple. `LinearDRPCTopologyBuilder` handles the rest of the coordination of connecting to the DRPC server and sending results back. + +### Local mode DRPC + +In the past to use DRPC in local mode it took creating a special LocalDRPC instance. This can still be used when writing tests for your code, but in the current version of storm when you run in local mode a LocalDRPC +instance is also created, and any DRPCClient created will link to it instead of the outside world. This means that any interaction you want to test needs to be a part of the script that launches the topology, just like +with LocalDRPC. + +### Remote mode DRPC + +Using DRPC on an actual cluster is also straightforward. There's three steps: + +1. Launch DRPC server(s) +2. Configure the locations of the DRPC servers +3. Submit DRPC topologies to Storm cluster + +Launching a DRPC server can be done with the `storm` script and is just like launching Nimbus or the UI: + +``` +bin/storm drpc +``` + +Next, you need to configure your Storm cluster to know the locations of the DRPC server(s). This is how `DRPCSpout` knows from where to read function invocations. This can be done through the `storm.yaml` file or the topology configurations. You should also specify storm.thrift.transport property to match DRPCClient settings. Configuring this through the `storm.yaml` looks something like this: + +```yaml +drpc.servers: + - "drpc1.foo.com" + - "drpc2.foo.com" +drpc.http.port: 8081 +storm.thrift.transport: "org.apache.storm.security.auth.plain.PlainSaslTransportPlugin" +``` + +Finally, you launch DRPC topologies using `StormSubmitter` just like you launch any other topology. To run the above example in remote mode, you do something like this: + +```java +StormSubmitter.submitTopology("exclamation-drpc", conf, builder.createRemoteTopology()); +``` + +`createRemoteTopology` is used to create topologies suitable for Storm clusters. + +Assuming that the topology is listening on the `exclaim` function you can execute something several differnt ways. + +Programatically: +```java +Config conf = new Config(); +try (DRPCClient drpc = DRPCClient.getConfiguredClient(conf)) { + //User the drpc client + String result = drpc.execute("exclaim", "argument"); +} +``` + +through curl: +```curl http://hostname:8081/drpc/exclaim/argument``` + +Through the command line: +```bin/storm drpc-client exclaim argument``` + +### A more complex example + +The exclamation DRPC example was a toy example for illustrating the concepts of DRPC. Let's look at a more complex example which really needs the parallelism a Storm cluster provides for computing the DRPC function. The example we'll look at is computing the reach of a URL on Twitter. + +The reach of a URL is the number of unique people exposed to a URL on Twitter. To compute reach, you need to: + +1. Get all the people who tweeted the URL +2. Get all the followers of all those people +3. Unique the set of followers +4. Count the unique set of followers + +A single reach computation can involve thousands of database calls and tens of millions of follower records during the computation. It's a really, really intense computation. As you're about to see, implementing this function on top of Storm is dead simple. On a single machine, reach can take minutes to compute; on a Storm cluster, you can compute reach for even the hardest URLs in a couple seconds. + +A sample reach topology is defined in storm-starter [here]({{page.git-blob-base}}/examples/storm-starter/src/jvm/org/apache/storm/starter/ReachTopology.java). Here's how you define the reach topology: + +```java +LinearDRPCTopologyBuilder builder = new LinearDRPCTopologyBuilder("reach"); +builder.addBolt(new GetTweeters(), 3); +builder.addBolt(new GetFollowers(), 12) + .shuffleGrouping(); +builder.addBolt(new PartialUniquer(), 6) + .fieldsGrouping(new Fields("id", "follower")); +builder.addBolt(new CountAggregator(), 2) + .fieldsGrouping(new Fields("id")); +``` + +The topology executes as four steps: + +1. `GetTweeters` gets the users who tweeted the URL. It transforms an input stream of `[id, url]` into an output stream of `[id, tweeter]`. Each `url` tuple will map to many `tweeter` tuples. +2. `GetFollowers` gets the followers for the tweeters. It transforms an input stream of `[id, tweeter]` into an output stream of `[id, follower]`. Across all the tasks, there may of course be duplication of follower tuples when someone follows multiple people who tweeted the same URL. +3. `PartialUniquer` groups the followers stream by the follower id. This has the effect of the same follower going to the same task. So each task of `PartialUniquer` will receive mutually independent sets of followers. Once `PartialUniquer` receives all the follower tuples directed at it for the request id, it emits the unique count of its subset of followers. +4. Finally, `CountAggregator` receives the partial counts from each of the `PartialUniquer` tasks and sums them up to complete the reach computation. + +Let's take a look at the `PartialUniquer` bolt: + +```java +public class PartialUniquer extends BaseBatchBolt { + BatchOutputCollector _collector; + Object _id; + Set _followers = new HashSet(); + + @Override + public void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, Object id) { + _collector = collector; + _id = id; + } + + @Override + public void execute(Tuple tuple) { + _followers.add(tuple.getString(1)); + } + + @Override + public void finishBatch() { + _collector.emit(new Values(_id, _followers.size())); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("id", "partial-count")); + } +} +``` + +`PartialUniquer` implements `IBatchBolt` by extending `BaseBatchBolt`. A batch bolt provides a first class API to processing a batch of tuples as a concrete unit. A new instance of the batch bolt is created for each request id, and Storm takes care of cleaning up the instances when appropriate. + +When `PartialUniquer` receives a follower tuple in the `execute` method, it adds it to the set for the request id in an internal `HashSet`. + +Batch bolts provide the `finishBatch` method which is called after all the tuples for this batch targeted at this task have been processed. In the callback, `PartialUniquer` emits a single tuple containing the unique count for its subset of follower ids. + +Under the hood, `CoordinatedBolt` is used to detect when a given bolt has received all of the tuples for any given request id. `CoordinatedBolt` makes use of direct streams to manage this coordination. + +The rest of the topology should be self-explanatory. As you can see, every single step of the reach computation is done in parallel, and defining the DRPC topology was extremely simple. + +### Non-linear DRPC topologies + +`LinearDRPCTopologyBuilder` only handles "linear" DRPC topologies, where the computation is expressed as a sequence of steps (like reach). It's not hard to imagine functions that would require a more complicated topology with branching and merging of the bolts. For now, to do this you'll need to drop down into using `CoordinatedBolt` directly. Be sure to talk about your use case for non-linear DRPC topologies on the mailing list to inform the construction of more general abstractions for DRPC topologies. + +### How LinearDRPCTopologyBuilder works + +* DRPCSpout emits [args, return-info]. return-info is the host and port of the DRPC server as well as the id generated by the DRPC server +* constructs a topology comprising of: + * DRPCSpout + * PrepareRequest (generates a request id and creates a stream for the return info and a stream for the args) + * CoordinatedBolt wrappers and direct groupings + * JoinResult (joins the result with the return info) + * ReturnResult (connects to the DRPC server and returns the result) +* LinearDRPCTopologyBuilder is a good example of a higher level abstraction built on top of Storm's primitives + +### Advanced +* KeyedFairBolt for weaving the processing of multiple requests at the same time +* How to use `CoordinatedBolt` directly diff --git a/docs/Docker-support.md b/docs/Docker-support.md new file mode 100644 index 00000000000..8a2695e4a9c --- /dev/null +++ b/docs/Docker-support.md @@ -0,0 +1,135 @@ +--- +title: Docker Support +layout: documentation +documentation: true +--- + +# Docker Support + +This page describes how storm supervisor launches the worker in a docker container. + +Note: This has only been tested on RHEL7. + +## Motivation + +This feature is mostly about security and portability. With workers running inside of docker containers, we isolate running user code from each other and from the hosted machine so that the whole system is less vulnerable to attack. +It also allows users to run their topologies on different os versions using different docker images. + +## Implementation + +Essentially, `DockerManager` composes a docker-run command and uses `worker-launcher` executable to execute the command +to launch a container. The `storm-worker-script.sh` script is the actual command to launch the worker process and logviewer in the container. +One container ID is mapped to one worker ID conceptually. When the worker process dies, the container exits. + +For security, when the supervisor launches the docker container, it makes the whole container read-only except some explicit bind mount locations. +It also drops all the kernel capabilities and disables container processes from gaining new privileges. + +For security reasons, we can drop privileges of containers like PTRACE. Consequently, `jmap`, `strace` and some other debugging tools cannot be used directly in the container when entered with docker-exec command. +We need to install `nscd` and have it running in the system. Storm will bind mount nscd directory when it launches the container. +And `nsenter` will be used to enter the docker container without losing privileges. This functionality is also implemented in `worker-launcher` executable. + +The command that will be run by `worker-launcher` executable to launch a container will be something like: + +```bash +run --name=8198e1f0-f323-4b9d-8625-e4fd640cd058 \ +--user=: \ +-d \ +--net=host \ +--read-only \ +-v /sys/fs/cgroup:/sys/fs/cgroup:ro \ +-v /usr/share/apache-storm-2.3.0:/usr/share/apache-storm-2.3.0:ro \ +-v //supervisor://supervisor:ro \ +-v //workers/8198e1f0-f323-4b9d-8625-e4fd640cd058://workers/8198e1f0-f323-4b9d-8625-e4fd640cd058 \ +-v //workers-artifacts/word-count-1-1591895933/6703://workers-artifacts/word-count-1-1591895933/6703 \ +-v //workers-users/8198e1f0-f323-4b9d-8625-e4fd640cd058://workers-users/8198e1f0-f323-4b9d-8625-e4fd640cd058 \ +-v /var/run/nscd:/var/run/nscd \ +-v //supervisor/stormdist/word-count-1-1591895933/shared_by_topology://supervisor/stormdist/word-count-1-1591895933/shared_by_topology \ +-v //workers/8198e1f0-f323-4b9d-8625-e4fd640cd058/tmp:/tmp \ +-v /etc/storm:/etc/storm:ro \ +--cgroup-parent=/storm \ +--group-add \ +--workdir=//workers/8198e1f0-f323-4b9d-8625-e4fd640cd058 \ +--cidfile=//workers/8198e1f0-f323-4b9d-8625-e4fd640cd058/container.cid \ +--cap-drop=ALL \ +--security-opt no-new-privileges \ +--security-opt seccomp=/usr/share/apache-storm-2.3.0/conf/seccomp.json \ +--cpus=2.6 xxx.xxx.com:8080/storm/storm/rhel7:latest \ +bash //workers/8198e1f0-f323-4b9d-8625-e4fd640cd058/storm-worker-script.sh +``` + + +## Setup + +To make supervisor work with docker, you need to configure related settings correctly following the instructions below. + +### Settings Related To Docker Support in Storm + +| Setting | Description | +|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `storm.resource.isolation.plugin.enable` | set to `true` to enable isolation plugin. `storm.resource.isolation.plugin` determines which plugin to use. If this is set to `false`, `org.apache.storm.container.DefaultResourceIsolationManager` will be used. | +| `storm.resource.isolation.plugin` | set to `"org.apache.storm.container.docker.DockerManager"` to enable docker support | +| `storm.oci.allowed.images` | An allowlist of docker images that can be used. Users can only choose a docker image from the list. +| `storm.oci.image` | The default docker image to be used if user doesn't specify which image to use. And it must belong to the `storm.oci.allowed.images` +| `topology.oci.image` | Topologies can specify which image to use. It must belong to the `storm.oci.allowed.images` | +| `storm.oci.cgroup.root` | The root path of cgroup for docker to use. On RHEL7, it should be "/sys/fs/cgroup". +| `storm.oci.cgroup.parent` | --cgroup-parent config for docker command. It must follow the constraints of docker commands. The path will be made as absolute path if it's a relative path because we saw some weird bugs ((the cgroup memory directory disappears after a while) when a relative path is used. +| `storm.oci.readonly.bindmounts` | A list of read only bind mounted directories. +| `storm.oci.readwrite.bindmounts` | A list of read write bind mounted directories. +| `storm.oci.nscd.dir` | The directory of nscd (name service cache daemon), e.g. "/var/run/nscd/". nscd must be running so that profiling can work properly. +| `storm.oci.seccomp.profile` | Specify the seccomp Json file to be used as a seccomp filter +| `supervisor.worker.launcher` | Full path to the worker-launcher executable. Details explained at [How to set up worker-launcher](#how-to-set-up-worker-launcher) + +Note that we only support cgroupfs cgroup driver because of some issues with `systemd` cgroup driver; restricting to `cgroupfs` also makes cgroup paths simpler. Please make sure to use `cgroupfs` before setting up docker support. + +#### Example + +Below is a simple configuration example for storm on Rhel7. In this example, storm is deployed at `/usr/share/apache-storm-2.3.0`. + +```bash +storm.resource.isolation.plugin.enable: true +storm.resource.isolation.plugin: "org.apache.storm.container.docker.DockerManager" +storm.oci.allowed.images: ["xxx.xxx.com:8080/storm/storm/rhel7:latest"] +storm.oci.image: "xxx.xxx.com:8080/storm/storm/rhel7:latest" +storm.oci.cgroup.root: "/storm" +storm.oci.cgroup.parent: "/sys/fs/cgroup" +storm.oci.readonly.bindmounts: + - "/etc/storm" +storm.oci.nscd.dir: "/var/run/nscd" +supervisor.worker.launcher: "/usr/share/apache-storm-2.3.0/bin/worker-launcher" +``` + +### How to set up worker-launcher + +The `worker-launcher` executable is a special program that is used to launch docker containers, run `docker` and `nsenter` commands. +For this to work, `worker-launcher` needs to be owned by root, but with the group set to be a group that only the supervisor headless user is a part of. +`worker-launcher` also needs to have `6550` octal permissions. There is also a `worker-launcher.cfg` file, usually located under `/etc/storm`, that should look something like the following: +``` +storm.worker-launcher.group=$(worker_launcher_group) +min.user.id=$(min_user_id) +worker.profiler.script.path=$(profiler_script_path) +``` +where `storm.worker-launcher.group` is the same group the supervisor user is a part of, and `min.user.id` is set to the first real user id on the system. This config file also needs to be owned by root and not have world nor group write permissions. +`worker.profiler.script.path` points to the profiler script. For security, the script should be only writable by root. Note that it's the only profiler script that will be used and `DaemonConfig.WORKER_PROFILER_COMMAND` will be ignored. + +There are two optional configs that will be used by docker support: `docker.binary` and `nsenter.binary`. By default, they are set to +``` +docker.binary=/usr/bin/docker +nsenter.binary=/usr/bin/nsenter +``` +and you don't need to set them in the worker-launcher.cfg unless you need to change them. + +## Profile the processes inside the container +You can profile your worker processes by clicking on the profiling buttons (jstack, heap, etc) on storm UI. +If you have sudo permission, you can also run `sudo nsenter --target --pid --mount --setuid --setgid ` to enter the container. +Then you can run `jstack`, `jmap` etc inside the container. `` is the pid of the container process on the host. +`` can be obtained by running `sudo docker inspect --format '{{.State.Pid}}' ` command. +`` and `` are the user id and group id of the container owner, respectively. + +## Seccomp security profiles + +You can set `storm.oci.seccomp.profile` to restrict the actions available within the container. If it's not set, the [default docker seccomp profile](https://github.com/moby/moby/blob/master/profiles/seccomp/default.json) +is used. You can use `conf/seccomp.json.example` provided or you can specify our own `seccomp.json` file. + +## CGroup Metrics + +Docker internally uses cgroups to control resources for containers. The CGroup Metrics described at [cgroups_in_storm.md](cgroups_in_storm.md#CGroup-Metrics) still apply except CGroupCpuGuarantee. To get CGroup cpu guarantee, use CGroupCpuGuaranteeByCfsQuota instead. \ No newline at end of file diff --git a/docs/Eventlogging.md b/docs/Eventlogging.md new file mode 100644 index 00000000000..6fbf1ed0d98 --- /dev/null +++ b/docs/Eventlogging.md @@ -0,0 +1,125 @@ +--- +title: Topology event inspector +layout: documentation +documentation: true +--- + +# Introduction + +Topology event inspector provides the ability to view the tuples as it flows through different stages in a storm topology. +This could be useful for inspecting the tuples emitted at a spout or a bolt in the topology pipeline while the topology is running, without stopping or redeploying the topology. The normal flow of tuples from the spouts to the bolts is not affected by turning on event logging. + +## Enabling event logging + +Note: Event logging needs to be enabled first by setting the storm config "topology.eventlogger.executors" to a non zero value. Please see +the [Configuration](#config) section for more details. + +Events can be logged by clicking the "Debug" button under the topology actions in the topology view. This logs the +tuples from all the spouts and bolts in a topology at the specified sampling percentage. + +
+ + +

Figure 1: Enable event logging at topology level.

+
+ +You could also enable event logging at a specific spout or bolt level by going to the corresponding component page and +clicking "Debug" under component actions. + +
+ + +

Figure 2: Enable event logging at component level.

+
+ +## Viewing the event logs +The Storm "logviewer" should be running for viewing the logged tuples. If not already running log viewer can be started by running the "bin/storm logviewer" command from the storm installation directory. For viewing the tuples, go to the specific spout or bolt component page from storm UI and click on the "events" link under the component summary (as highlighted in Figure 2 above). + +This would open up a view like below where you can navigate between different pages and view the logged tuples. + +
+ + +

Figure 3: Viewing the logged events.

+
+ +Each line in the event log contains an entry corresponding to a tuple emitted from a specific spout/bolt in a comma separated format. + +`Timestamp, Component name, Component task-id, MessageId (in case of anchoring), List of emitted values` + +## Disabling the event logs + +Event logging can be disabled at a specific component or at the topology level by clicking the "Stop Debug" under the topology or component actions in the Storm UI. + +
+ + +

Figure 4: Disable event logging at topology level.

+
+ +## Configuration +Eventlogging works by sending the events (tuples) from each component to an internal eventlogger bolt. By default Storm does not start any event logger tasks, but this can be easily changed by setting the below parameter while running your topology (by setting it in storm.yaml or passing options via command line). + +| Parameter | Meaning | +| -------------------------------------------|-----------------------| +| "topology.eventlogger.executors": 0 | No event logger tasks are created (default). | +| "topology.eventlogger.executors": 1 | One event logger task for the topology. | +| "topology.eventlogger.executors": nil | One event logger task per worker. | + + +## Extending eventlogging + +Storm provides an `IEventLogger` interface which is used by the event logger bolt to log the events. + +```java +/** + * EventLogger interface for logging the event info to a sink like log file or db + * for inspecting the events via UI for debugging. + */ +public interface IEventLogger { + /** + * Invoked during eventlogger bolt prepare. + */ + void prepare(Map stormConf, Map arguments, TopologyContext context); + + /** + * Invoked when the {@link EventLoggerBolt} receives a tuple from the spouts or bolts that has event logging enabled. + * + * @param e the event + */ + void log(EventInfo e); + + /** + * Invoked when the event logger bolt is cleaned up + */ + void close(); +} +``` + +The default implementation for this is a FileBasedEventLogger which logs the events to an events.log file ( `logs/workers-artifacts///events.log`). +Alternate implementations of the `IEventLogger` interface can be added to extend the event logging functionality (say build a search index or log the events in a database etc) + +If you just want to use FileBasedEventLogger but with changing the log format, you can simply implement your own by extending FileBasedEventLogger and override `buildLogMessage(EventInfo)` to provide log line explicitly. + +To register event logger to your topology, add to your topology's configuration like: + +```java +conf.registerEventLogger(org.apache.storm.metric.FileBasedEventLogger.class); +``` + +You can refer [Config#registerEventLogger](javadocs/org/apache/storm/Config.html#registerEventLogger-java.lang.Class-) and overloaded methods from javadoc. + +Otherwise edit the storm.yaml config file: + +```yaml +topology.event.logger.register: + - class: "org.apache.storm.metric.FileBasedEventLogger" + - class: "org.mycompany.MyEventLogger" + arguments: + endpoint: "event-logger.mycompany.org" +``` + +When you implement your own event logger, `arguments` is passed to Map when [IEventLogger#prepare](javadocs/org/apache/storm/metric/IEventLogger.html#prepare-java.util.Map-java.lang.Map-org.apache.storm.task.TopologyContext-) is called. + +Please keep in mind that EventLoggerBolt is just a kind of Bolt, so whole throughput of the topology will go down when registered event loggers cannot keep up handling incoming events, so you may want to take care of the Bolt like normal Bolt. +One of idea to avoid this is making your implementation of IEventLogger as `non-blocking` fashion. diff --git a/docs/FAQ.md b/docs/FAQ.md new file mode 100644 index 00000000000..f59d45fad14 --- /dev/null +++ b/docs/FAQ.md @@ -0,0 +1,127 @@ +--- +title: FAQ +layout: documentation +documentation: true +--- + +## Best Practices + +### What rules of thumb can you give me for configuring Storm+Trident? + +* number of workers a multiple of number of machines; parallelism a multiple of number of workers; number of kafka partitions a multiple of number of spout parallelism +* Use one worker per topology per machine +* Start with fewer, larger aggregators, one per machine with workers on it +* Use the isolation scheduler +* Use one acker per worker -- 0.9 makes that the default, but earlier versions do not. +* enable GC logging; you should see very few major GCs if things are in reasonable shape. +* set the trident batch millis to about 50% of your typical end-to-end latency. +* Start with a max spout pending that is for sure too small -- one for trident, or the number of executors for storm -- and increase it until you stop seeing changes in the flow. You'll probably end up with something near `2*(throughput in recs/sec)*(end-to-end latency)` (2x the Little's law capacity). + +### What are some of the best ways to get a worker to mysteriously and bafflingly die? + +* Do you have write access to the log directory +* Are you blowing out your heap? +* Are all the right libraries installed on all of the workers? +* Is the zookeeper hostname still set to localhost? +* Did you supply a correct, unique hostname -- one that resolves back to the machine -- to each worker, and put it in the storm conf file? +* Have you opened firewall/securitygroup permissions _bidirectionally_ among a) all the workers, b) the storm master, c) zookeeper? Also, from the workers to any kafka/kestrel/database/etc that your topology accesses? Use netcat to poke the appropriate ports and be sure. + +### Halp! I cannot see: + +* **my logs** Logs by default go to $STORM_HOME/logs. Check that you have write permissions to that directory. They are configured in log4j2/{cluster, worker}.xml. +* **final JVM settings** Add the `-XX+PrintFlagsFinal` commandline option in the childopts (see the conf file) +* **final Java system properties** Add `Properties props = System.getProperties(); props.list(System.out);` near where you build your topology. + +### How many Workers should I use? + +The total number of workers is set by the supervisors -- there's some number of JVM slots each supervisor will superintend. The thing you set on the topology is how many worker slots it will try to claim. + +There's no great reason to use more than one worker per topology per machine. + +With one topology running on three 8-core nodes, and parallelism hint 24, each bolt gets 8 executors per machine, i.e. one for each core. There are three big benefits to running three workers (with 8 assigned executors each) compare to running say 24 workers (one assigned executor each). + +First, data that is repartitioned (shuffles or group-bys) to executors in the same worker will not have to hit the transfer buffer. Instead, tuples are deposited directly from send to receive buffer. That's a big win. By contrast, if the destination executor were on the same machine in a different worker, it would have to go send -> worker transfer -> local socket -> worker recv -> exec recv buffer. It doesn't hit the network card, but it's not as big a win as when executors are in the same worker. + +Second, you're typically better off with three aggregators having very large backing cache than having twenty-four aggregators having small backing caches. This reduces the effect of skew, and improves LRU efficiency. + +Lastly, fewer workers reduces control flow chatter. + +## Topology + +### Can a Trident topology have Multiple Streams? + +> Can a Trident Topology work like a workflow with conditional paths (if-else)? e.g. A Spout (S1) connects to a bolt (B0) which based on certain values in the incoming tuple routes them to either bolt (B1) or bolt (B2) but not both. + +A Trident "each" operator returns a Stream object, which you can store in a variable. You can then run multiple eaches on the same Stream to split it, e.g.: + + Stream s = topology.each(...).groupBy(...).aggregate(...) + Stream branch1 = s.each(..., FilterA) + Stream branch2 = s.each(..., FilterB) + +You can join streams with join, merge or multiReduce. + +At time of writing, you can't emit to multiple output streams from Trident -- see [STORM-68](https://issues.apache.org/jira/browse/STORM-68) + +### Why am I getting a NotSerializableException/IllegalStateException when my topology is being started up? + +Within the Storm lifecycle, the topology is instantiated and then serialized to byte format to be stored in ZooKeeper, prior to the topology being executed. Within this step, if a spout or bolt within the topology has an initialized unserializable property, serialization will fail. If there is a need for a field that is unserializable, initialize it within the bolt's `prepare` or spout's `open` method, which is run after the topology is delivered to the worker. + +## Spouts + +### What is a coordinator, and why are there several? + +A trident-spout is actually run within a storm _bolt_. The storm-spout of a trident topology is the MasterBatchCoordinator -- it coordinates trident batches and is the same no matter what spouts you use. A batch is born when the MBC dispenses a seed tuple to each of the spout-coordinators. The spout-coordinator bolts know how your particular spouts should cooperate -- so in the kafka case, it's what helps figure out what partition and offset range each spout should pull from. + +### What can I store into the spout's metadata record? + +You should only store static data, and as little of it as possible, into the metadata record (note: maybe you _can_ store more interesting things; you shouldn't, though) + +### How often is the 'emitBatchNew' function called? + +Since the MBC is the actual spout, all the tuples in a batch are just members of its tupletree. That means storm's "max spout pending" config effectively defines the number of concurrent batches trident runs. The MBC emits a new batch if it has fewer than max-spending tuples pending and if at least one [trident batch interval]({{page.git-blob-base}}/conf/defaults.yaml#L115)'s worth of seconds has passed since the last batch. + +### If nothing was emitted does Trident slow down the calls? + +Yes, there's a pluggable "spout wait strategy"; the default is to sleep for a [configurable amount of time]({{page.git-blob-base}}/conf/defaults.yaml#L110) + +### OK, then what is the trident batch interval for? + +You know how computers of the 486 era had a [turbo button](http://en.wikipedia.org/wiki/Turbo_button) on them? It's like that. + +Actually, it has two practical uses. One is to throttle spouts that poll a remote source without throttling processing. For example, we have a spout that looks in a given S3 bucket for a new batch-uploaded file to read, linebreak and emit. We don't want it hitting S3 more than every few seconds: files don't show up more than once every few minutes, and a batch takes a few seconds to process. + +The other is to limit overpressure on the internal queues during startup or under a heavy burst load -- if the spouts spring to life and suddenly jam ten batches' worth of records into the system, you could have a mass of less-urgent tuples from batch 7 clog up the transfer buffer and prevent the $commit tuple from batch 3 to get through (or even just the regular old tuples from batch 3). What we do is set the trident batch interval to about half the typical end-to-end processing latency -- if it takes 600ms to process a batch, it's OK to only kick off a batch every 300ms. + +Note that this is a cap, not an additional delay -- with a period of 300ms, if your batch takes 258ms Trident will only delay an additional 42ms. + +### How do you set the batch size? + +Trident doesn't place its own limits on the batch count. In the case of the Kafka spout, the max fetch bytes size divided by the average record size defines an effective records per subbatch partition. + +### How do I resize a batch? + +The trident batch is a somewhat overloaded facility. Together with the number of partitions, the batch size is constrained by or serves to define + +1. the unit of transactional safety (tuples at risk vs time) +2. per partition, an effective windowing mechanism for windowed stream analytics +3. per partition, the number of simultaneous queries that will be made by a partitionQuery, partitionPersist, etc; +4. per partition, the number of records convenient for the spout to dispatch at the same time; + +You can't change the overall batch size once generated, but you can change the number of partitions -- do a shuffle and then change the parallelism hint + +## Time Series + +### How do I aggregate events by time? + +If you have records with an immutable timestamp, and you would like to count, average or otherwise aggregate them into discrete time buckets, Trident is an excellent and scalable solution. + +Write an `Each` function that turns the timestamp into a time bucket: if the bucket size was "by hour", then the timestamp `2013-08-08 12:34:56` would be mapped to the `2013-08-08 12:00:00` time bucket, and so would everything else in the twelve o'clock hour. Then group on that timebucket and use a grouped persistentAggregate. The persistentAggregate uses a local cacheMap backed by a data store. Groups with many records require very few reads from the data store, and use efficient bulk reads and writes; as long as your data feed is relatively prompt Trident will make very efficient use of memory and network. Even if a server drops off line for a day, then delivers that full day's worth of data in a rush, the old results will be calmly retrieved and updated -- and without interfering with calculating the current results. + +### How can I know that all records for a time bucket have been received? + +You cannot know that all events are collected -- this is an epistemological challenge, not a distributed systems challenge. You can: + +* Set a time limit using domain knowledge +* Introduce a _punctuation_: a record known to come after all records in the given time bucket. Trident uses this scheme to know when a batch is complete. If you for instance receive records from a set of sensors, each in order for that sensor, then once all sensors have sent you a 3:02:xx or later timestamp lets you know you can commit. +* When possible, make your process incremental: each value that comes in makes the answer more and more true. A Trident ReducerAggregator is an operator that takes a prior result and a set of new records and returns a new result. This lets the result be cached and serialized to a datastore; if a server drops off line for a day and then comes back with a full day's worth of data in a rush, the old results will be calmly retrieved and updated. +* Lambda architecture: Record all events into an archival store (S3, HBase, HDFS) on receipt. in the fast layer, once the time window is clear, process the bucket to get an actionable answer, and ignore everything older than the time window. Periodically run a global aggregation to calculate a "correct" answer. diff --git a/docs/Fault-tolerance.md b/docs/Fault-tolerance.md new file mode 100644 index 00000000000..9a7a349f5b2 --- /dev/null +++ b/docs/Fault-tolerance.md @@ -0,0 +1,28 @@ +--- +layout: documentation +--- +This page explains the design details of Storm that make it a fault-tolerant system. + +## What happens when a worker dies? + +When a worker dies, the supervisor will restart it. If it continuously fails on startup and is unable to heartbeat to Nimbus, Nimbus will reassign the worker to another machine. + +## What happens when a node dies? + +The tasks assigned to that machine will time-out and Nimbus will reassign those tasks to other machines. + +## What happens when Nimbus or Supervisor daemons die? + +The Nimbus and Supervisor daemons are designed to be fail-fast (process self-destructs whenever any unexpected situation is encountered) and stateless (all state is kept in Zookeeper or on disk). As described in [Setting up a Storm cluster](Setting-up-a-Storm-cluster.html), the Nimbus and Supervisor daemons must be run under supervision using a tool like daemontools or monit. So if the Nimbus or Supervisor daemons die, they restart like nothing happened. + +Most notably, no worker processes are affected by the death of Nimbus or the Supervisors. This is in contrast to Hadoop, where if the JobTracker dies, all the running jobs are lost. + +## Is Nimbus a single point of failure? + +If you lose the Nimbus node, the workers will still continue to function. Additionally, supervisors will continue to restart workers if they die. However, without Nimbus, workers won't be reassigned to other machines when necessary (like if you lose a worker machine). + +So the answer is that Nimbus is "sort of" a SPOF. In practice, it's not a big deal since nothing catastrophic happens when the Nimbus daemon dies. There are plans to make Nimbus highly available in the future. + +## How does Storm guarantee data processing? + +Storm provides mechanisms to guarantee data processing even if nodes die or messages are lost. See [Guaranteeing message processing](Guaranteeing-message-processing.html) for the details. diff --git a/docs/Generic-resources.md b/docs/Generic-resources.md new file mode 100644 index 00000000000..f3bfe3e98ca --- /dev/null +++ b/docs/Generic-resources.md @@ -0,0 +1,39 @@ +--- +title: Generic Resources +layout: documentation +documentation: true +--- + +### Generic Resources +Generic Resources allow Storm to reference arbitrary resource types. Generic Resources may be considered an extension of the resources enumerated by the [Resource Aware Scheduler](Resource_Aware_Scheduler_overview.html), which accounts for CPU and memory. + +### API Overview +For a Storm Topology, the user can now specify the amount of generic resources a topology component (i.e. Spout or Bolt) is required to run a single instance of the component. The user can specify the resource requirement for a topology component by using the following API call. +``` + public T addResource(String resourceName, Number resourceValue) +``` +Parameters: +- resourceName – The name of the generic resource +- resourceValue – The amount of the generic resource + +Example of Usage: +``` + SpoutDeclarer s1 = builder.setSpout("word", new TestWordSpout(), 10); + s1.addResouce("gpu.count", 1.0); +``` + +### Specifying Generic Cluster Resources + +A storm administrator can specify node resource availability by modifying the _conf/storm.yaml_ file located in the storm home directory of that node. +``` + supervisor.resources.map: {[type] : [amount]} +``` +Example of Usage: +``` + supervisor.resources.map: {"gpu.count" : 2.0} +``` + + +### Generic Resources in UI + +![Storm Cluster UI](images/storm_ui.png) diff --git a/docs/Guaranteeing-message-processing.md b/docs/Guaranteeing-message-processing.md new file mode 100644 index 00000000000..4c2314cd9f9 --- /dev/null +++ b/docs/Guaranteeing-message-processing.md @@ -0,0 +1,181 @@ +--- +title: Guaranteeing Message Processing +layout: documentation +documentation: true +--- +Storm offers several different levels of guaranteed message processing, including best effort, at least once, and exactly once through [Trident](Trident-tutorial.html). +This page describes how Storm can guarantee at least once processing. + +### What does it mean for a message to be "fully processed"? + +A tuple coming off a spout can trigger thousands of tuples to be created based on it. Consider, for example, the streaming word count topology: + +```java +TopologyBuilder builder = new TopologyBuilder(); +builder.setSpout("sentences", new KestrelSpout("kestrel.backtype.com", + 22133, + "sentence_queue", + new StringScheme())); +builder.setBolt("split", new SplitSentence(), 10) + .shuffleGrouping("sentences"); +builder.setBolt("count", new WordCount(), 20) + .fieldsGrouping("split", new Fields("word")); +``` + +This topology reads sentences off of a Kestrel queue, splits the sentences into its constituent words, and then emits for each word the number of times it has seen that word before. A tuple coming off the spout triggers many tuples being created based on it: a tuple for each word in the sentence and a tuple for the updated count for each word. The tree of messages looks something like this: + +![Tuple tree](images/tuple_tree.png) + +Storm considers a tuple coming off a spout "fully processed" when the tuple tree has been exhausted and every message in the tree has been processed. A tuple is considered failed when its tree of messages fails to be fully processed within a specified timeout. This timeout can be configured on a topology-specific basis using the [Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS](javadocs/org/apache/storm/Config.html#TOPOLOGY_MESSAGE_TIMEOUT_SECS) configuration and defaults to 30 seconds. + +### What happens if a message is fully processed or fails to be fully processed? + +To understand this question, let's take a look at the lifecycle of a tuple coming off of a spout. For reference, here is the interface that spouts implement (see the [Javadoc](javadocs/org/apache/storm/spout/ISpout.html) for more information): + +```java +public interface ISpout extends Serializable { + void open(Map conf, TopologyContext context, SpoutOutputCollector collector); + void close(); + void nextTuple(); + void ack(Object msgId); + void fail(Object msgId); +} +``` + +First, Storm requests a tuple from the `Spout` by calling the `nextTuple` method on the `Spout`. The `Spout` uses the `SpoutOutputCollector` provided in the `open` method to emit a tuple to one of its output streams. When emitting a tuple, the `Spout` provides a "message id" that will be used to identify the tuple later. For example, the `KestrelSpout` reads a message off of the kestrel queue and emits as the "message id" the id provided by Kestrel for the message. Emitting a message to the `SpoutOutputCollector` looks like this: + +```java +_collector.emit(new Values("field1", "field2", 3) , msgId); +``` + +Next, the tuple gets sent to consuming bolts and Storm takes care of tracking the tree of messages that is created. If Storm detects that a tuple is fully processed, Storm will call the `ack` method on the originating `Spout` task with the message id that the `Spout` provided to Storm. Likewise, if the tuple times-out Storm will call the `fail` method on the `Spout`. Note that a tuple will be acked or failed by the exact same `Spout` task that created it. So if a `Spout` is executing as many tasks across the cluster, a tuple won't be acked or failed by a different task than the one that created it. + +Let's use `KestrelSpout` again to see what a `Spout` needs to do to guarantee message processing. When `KestrelSpout` takes a message off the Kestrel queue, it "opens" the message. This means the message is not actually taken off the queue yet, but instead placed in a "pending" state waiting for acknowledgement that the message is completed. While in the pending state, a message will not be sent to other consumers of the queue. Additionally, if a client disconnects all pending messages for that client are put back on the queue. When a message is opened, Kestrel provides the client with the data for the message as well as a unique id for the message. The `KestrelSpout` uses that exact id as the "message id" for the tuple when emitting the tuple to the `SpoutOutputCollector`. Sometime later on, when `ack` or `fail` are called on the `KestrelSpout`, the `KestrelSpout` sends an ack or fail message to Kestrel with the message id to take the message off the queue or have it put back on. + +### What is Storm's reliability API? + +There are two things you have to do as a user to benefit from Storm's reliability capabilities. First, you need to tell Storm whenever you're creating a new link in the tree of tuples. Second, you need to tell Storm when you have finished processing an individual tuple. By doing both these things, Storm can detect when the tree of tuples is fully processed and can ack or fail the spout tuple appropriately. Storm's API provides a concise way of doing both of these tasks. + +Specifying a link in the tuple tree is called _anchoring_. Anchoring is done at the same time you emit a new tuple. Let's use the following bolt as an example. This bolt splits a tuple containing a sentence into a tuple for each word: + +```java +public class SplitSentence extends BaseRichBolt { + OutputCollector _collector; + + public void prepare(Map conf, TopologyContext context, OutputCollector collector) { + _collector = collector; + } + + public void execute(Tuple tuple) { + String sentence = tuple.getString(0); + for(String word: sentence.split(" ")) { + _collector.emit(tuple, new Values(word)); + } + _collector.ack(tuple); + } + + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("word")); + } + } +``` + +Each word tuple is _anchored_ by specifying the input tuple as the first argument to `emit`. Since the word tuple is anchored, the spout tuple at the root of the tree will be replayed later on if the word tuple failed to be processed downstream. In contrast, let's look at what happens if the word tuple is emitted like this: + +```java +_collector.emit(new Values(word)); +``` + +Emitting the word tuple this way causes it to be _unanchored_. If the tuple fails be processed downstream, the root tuple will not be replayed. Depending on the fault-tolerance guarantees you need in your topology, sometimes it's appropriate to emit an unanchored tuple. + +An output tuple can be anchored to more than one input tuple. This is useful when doing streaming joins or aggregations. A multi-anchored tuple failing to be processed will cause multiple tuples to be replayed from the spouts. Multi-anchoring is done by specifying a list of tuples rather than just a single tuple. For example: + +```java +List anchors = new ArrayList(); +anchors.add(tuple1); +anchors.add(tuple2); +_collector.emit(anchors, new Values(1, 2, 3)); +``` + +Multi-anchoring adds the output tuple into multiple tuple trees. Note that it's also possible for multi-anchoring to break the tree structure and create tuple DAGs, like so: + +![Tuple DAG](images/tuple-dag.png) + +Storm's implementation works for DAGs as well as trees (pre-release it only worked for trees, and the name "tuple tree" stuck). + +Anchoring is how you specify the tuple tree -- the next and final piece to Storm's reliability API is specifying when you've finished processing an individual tuple in the tuple tree. This is done by using the `ack` and `fail` methods on the `OutputCollector`. If you look back at the `SplitSentence` example, you can see that the input tuple is acked after all the word tuples are emitted. + +You can use the `fail` method on the `OutputCollector` to immediately fail the spout tuple at the root of the tuple tree. For example, your application may choose to catch an exception from a database client and explicitly fail the input tuple. By failing the tuple explicitly, the spout tuple can be replayed faster than if you waited for the tuple to time-out. + +Every tuple you process must be acked or failed. Storm uses memory to track each tuple, so if you don't ack/fail every tuple, the task will eventually run out of memory. + +A lot of bolts follow a common pattern of reading an input tuple, emitting tuples based on it, and then acking the tuple at the end of the `execute` method. These bolts fall into the categories of filters and simple functions. Storm has an interface called `BasicBolt` that encapsulates this pattern for you. The `SplitSentence` example can be written as a `BasicBolt` like follows: + +```java +public class SplitSentence extends BaseBasicBolt { + public void execute(Tuple tuple, BasicOutputCollector collector) { + String sentence = tuple.getString(0); + for(String word: sentence.split(" ")) { + collector.emit(new Values(word)); + } + } + + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("word")); + } + } +``` + +This implementation is simpler than the implementation from before and is semantically identical. Tuples emitted to `BasicOutputCollector` are automatically anchored to the input tuple, and the input tuple is acked for you automatically when the execute method completes. + +In contrast, bolts that do aggregations or joins may delay acking a tuple until after it has computed a result based on a bunch of tuples. Aggregations and joins will commonly multi-anchor their output tuples as well. These things fall outside the simpler pattern of `IBasicBolt`. + +### How do I make my applications work correctly given that tuples can be replayed? + +As always in software design, the answer is "it depends." If you really want exactly once semantics use the [Trident](Trident-tutorial.html) API. In some cases, like with a lot of analytics, dropping data is OK so disabling the fault tolerance by setting the number of acker bolts to 0 [Config.TOPOLOGY_ACKERS](javadocs/org/apache/storm/Config.html#TOPOLOGY_ACKERS). But in some cases you want to be sure that everything was processed at least once and nothing was dropped. This is especially useful if all operations are idenpotent or if deduping can happen aferwards. + +### How does Storm implement reliability in an efficient way? + +A Storm topology has a set of special "acker" tasks that track the DAG of tuples for every spout tuple. When an acker sees that a DAG is complete, it sends a message to the spout task that created the spout tuple to ack the message. You can set the number of acker tasks for a topology in the topology configuration using [Config.TOPOLOGY_ACKERS](javadocs/org/apache/storm/Config.html#TOPOLOGY_ACKERS). Storm defaults TOPOLOGY_ACKERS to one task per worker. + +The best way to understand Storm's reliability implementation is to look at the lifecycle of tuples and tuple DAGs. When a tuple is created in a topology, whether in a spout or a bolt, it is given a random 64 bit id. These ids are used by ackers to track the tuple DAG for every spout tuple. + +Every tuple knows the ids of all the spout tuples for which it exists in their tuple trees. When you emit a new tuple in a bolt, the spout tuple ids from the tuple's anchors are copied into the new tuple. When a tuple is acked, it sends a message to the appropriate acker tasks with information about how the tuple tree changed. In particular it tells the acker "I am now completed within the tree for this spout tuple, and here are the new tuples in the tree that were anchored to me". + +For example, if tuples "D" and "E" were created based on tuple "C", here's how the tuple tree changes when "C" is acked: + +![What happens on an ack](images/ack_tree.png) + +Since "C" is removed from the tree at the same time that "D" and "E" are added to it, the tree can never be prematurely completed. + +There are a few more details to how Storm tracks tuple trees. As mentioned already, you can have an arbitrary number of acker tasks in a topology. This leads to the following question: when a tuple is acked in the topology, how does it know to which acker task to send that information? + +Storm uses mod hashing to map a spout tuple id to an acker task. Since every tuple carries with it the spout tuple ids of all the trees they exist within, they know which acker tasks to communicate with. + +Another detail of Storm is how the acker tasks track which spout tasks are responsible for each spout tuple they're tracking. When a spout task emits a new tuple, it simply sends a message to the appropriate acker telling it that its task id is responsible for that spout tuple. Then when an acker sees a tree has been completed, it knows to which task id to send the completion message. + +Acker tasks do not track the tree of tuples explicitly. For large tuple trees with tens of thousands of nodes (or more), tracking all the tuple trees could overwhelm the memory used by the ackers. Instead, the ackers take a different strategy that only requires a fixed amount of space per spout tuple (about 20 bytes). This tracking algorithm is the key to how Storm works and is one of its major breakthroughs. + +An acker task stores a map from a spout tuple id to a pair of values. The first value is the task id that created the spout tuple which is used later on to send completion messages. The second value is a 64 bit number called the "ack val". The ack val is a representation of the state of the entire tuple tree, no matter how big or how small. It is simply the xor of all tuple ids that have been created and/or acked in the tree. + +When an acker task sees that an "ack val" has become 0, then it knows that the tuple tree is completed. Since tuple ids are random 64 bit numbers, the chances of an "ack val" accidentally becoming 0 is extremely small. If you work the math, at 10K acks per second, it will take 50,000,000 years until a mistake is made. And even then, it will only cause data loss if that tuple happens to fail in the topology. + +Now that you understand the reliability algorithm, let's go over all the failure cases and see how in each case Storm avoids data loss: + +- **A tuple isn't acked because the task died**: In this case the spout tuple ids at the root of the trees for the failed tuple will time out and be replayed. +- **Acker task dies**: In this case all the spout tuples the acker was tracking will time out and be replayed. +- **Spout task dies**: In this case the source that the spout talks to is responsible for replaying the messages. For example, queues like Kestrel and RabbitMQ will place all pending messages back on the queue when a client disconnects. + +As you have seen, Storm's reliability mechanisms are completely distributed, scalable, and fault-tolerant. + +### Tuning reliability + +Acker tasks are lightweight, so you don't need very many of them in a topology. You can track their performance through the Storm UI (component id "__acker"). If the throughput doesn't look right, you'll need to add more acker tasks. + +If reliability isn't important to you -- that is, you don't care about losing tuples in failure situations -- then you can improve performance by not tracking the tuple tree for spout tuples. Not tracking a tuple tree halves the number of messages transferred since normally there's an ack message for every tuple in the tuple tree. Additionally, it requires fewer ids to be kept in each downstream tuple, reducing bandwidth usage. + +There are three ways to remove reliability. The first is to set Config.TOPOLOGY_ACKERS to 0. In this case, Storm will call the `ack` method on the spout immediately after the spout emits a tuple. The tuple tree won't be tracked. + +The second way is to remove reliability on a message by message basis. You can turn off tracking for an individual spout tuple by omitting a message id in the `SpoutOutputCollector.emit` method. + +Finally, if you don't care if a particular subset of the tuples downstream in the topology fail to be processed, you can emit them as unanchored tuples. Since they're not anchored to any spout tuples, they won't cause any spout tuples to fail if they aren't acked. diff --git a/docs/Hooks.md b/docs/Hooks.md new file mode 100644 index 00000000000..c88a5632cc8 --- /dev/null +++ b/docs/Hooks.md @@ -0,0 +1,26 @@ +--- +title: Hooks +layout: documentation +documentation: true +--- +## Task hooks +Storm provides hooks with which you can insert custom code to run on any number of events within Storm. You create a hook by extending the [BaseTaskHook](javadocs/org/apache/storm/hooks/BaseTaskHook.html) class and overriding the appropriate method for the event you want to catch. There are two ways to register your hook: + +1. In the open method of your spout or prepare method of your bolt using the [TopologyContext](javadocs/org/apache/storm/task/TopologyContext.html#addTaskHook) method. +2. Through the Storm configuration using the ["topology.auto.task.hooks"](javadocs/org/apache/storm/Config.html#TOPOLOGY_AUTO_TASK_HOOKS) config. These hooks are automatically registered in every spout or bolt, and are useful for doing things like integrating with a custom monitoring system. + +## Worker hooks +Storm also provides worker-level hooks that are called during worker startup, before any bolts or spouts are prepared/opened. You can create such a hook by extending [BaseWorkerHook](javadocs/org/apache/storm/hooks/BaseWorkerHook) (an implementation of [IWorkerHook](javadocs/org/apache/storm/hooks/IWorkerHook.html)) and overriding the methods you want to implement. You can register your hook via `TopologyBuilder.addWorkerHook`. +The `IWorkerHook#start(Map, WorkerUserContext)` lifecycle method exposes [WorkerUserContext](javadocs/org/apache/storm/hooks/IWorkerHook.html) which provides a way to set application-level common resources via `setResource(String, Object)` method. This resource can then be retrieved by tasks, both spouts (via `open(Map, TopologyContext, SpoutOutputCollector`) and bolts (via `prepare(Map, TopologyContext, OutputCollector`), by calling `TopologyContext#getResource(String)`. + +## Shared State amongst components and hooks +Storm provides ways to share resources across different components via the following ways: +1. taskData: this pertains to the task level data and can be written and read by task and task hooks in their corresponding lifecycle methods (`open` for spout and `prepare` for bolt and task hook). + 1. write access: `TopologyContext#setTaskData(String, Object)` + 2. read access: `TopologyContext#getTask(String)` +2. executorData: this pertains to executor level data and is shared across tasks and task hooks which are managed by the concerned executor. Similar to above it is accessible to spouts via `open` and to bolts and task hooks via `prepare` lifecycle method. + 1. write access: `TopologyContext#setExecutorData` + 2. read access: `TopologyContext#getExecutorData(String)` +3. userResources: this pertains to worker level data and is shared across executors, tasks, worker hooks and task hooks which are managed by the concerned worker. Unlike others it can only be written by worker hooks. + 1. write access: `WorkerUserContext#setResource(String, Object)` + 2. read access: `WorkerTopologyContext#getResouce(String)` or `TopologyContext#getResource(String)` diff --git a/docs/IConfigLoader.md b/docs/IConfigLoader.md new file mode 100644 index 00000000000..3c2f4de1ed9 --- /dev/null +++ b/docs/IConfigLoader.md @@ -0,0 +1,51 @@ +--- +title: IConfigLoader +layout: documentation +documentation: true +--- + + +### Introduction +IConfigLoader is an interface designed to allow dynamic loading of scheduler resource constraints. Currently, the MultiTenant scheduler uses this interface to dynamically load the number of isolated nodes a given user has been guaranteed, and the ResoureAwareScheduler uses the interface to dynamically load per user resource guarantees. + +The following interface is provided for users to create an IConfigLoader instance based on the scheme of the `scheduler.config.loader.uri`. +``` +ConfigLoaderFactoryService.createConfigLoader(Map conf) +``` + +------ + +### Interface +``` +public interface IConfigLoader { + Map load(); +}; +``` +#### Description + - load is called by the scheduler whenever it wishes to retrieve the most recent configuration map. + +#### Loader Configuration +The loaders are dynamically selected and dynamically configured through configuration items in the scheduler implementations. + +##### Example +``` +scheduler.config.loader.uri: "artifactory+http://artifactory.my.company.com:8000/artifactory/configurations/clusters/my_cluster/ras_pools" +scheduler.config.loader.timeout.sec: 30 +``` +Or +``` +scheduler.config.loader.uri: "file:///path/to/my/config.yaml" +``` +### Implementations + +There are currently two implemenations of IConfigLoader + - org.apache.storm.scheduler.utils.ArtifactoryConfigLoader: Load configurations from an Artifactory server. + It will be used if users add `artifactory+` to the scheme of the real URI and set to `scheduler.config.loader.uri`. + - org.apache.storm.scheduler.utils.FileConfigLoader: Load configurations from a local file. It will be used if users use `file` scheme. + +#### Configurations + - scheduler.config.loader.uri: For `ArtifactoryConfigLoader`, this can either be a reference to an individual file in Artifactory or to a directory. If it is a directory, the file with the largest lexographic name will be returned. + For `FileConfigLoader`, this is the URI pointing to a file. + - scheduler.config.loader.timeout.secs: Currently only used in `ArtifactoryConfigLoader`. It is the amount of time an http connection to the artifactory server will wait before timing out. The default is 10. + - scheduler.config.loader.polltime.secs: Currently only used in `ArtifactoryConfigLoader`. It is the frequency at which the plugin will call out to artifactory instead of returning the most recently cached result. The default is 600 seconds. + - scheduler.config.loader.artifactory.base.directory: Only used in `ArtifactoryConfigLoader`. It is the part of the uri, configurable in Artifactory, which represents the top of the directory tree. It defaults to "/artifactory". \ No newline at end of file diff --git a/docs/Implementation-docs.md b/docs/Implementation-docs.md new file mode 100644 index 00000000000..46fe88cd38a --- /dev/null +++ b/docs/Implementation-docs.md @@ -0,0 +1,13 @@ +--- +title: Storm Internal Implementation +layout: documentation +documentation: true +--- +This section of the wiki is dedicated to explaining how Storm is implemented. You should have a good grasp of how to use Storm before reading these sections. + +- [Structure of the codebase](Structure-of-the-codebase.html) +- [Lifecycle of a topology](Lifecycle-of-a-topology.html) +- [Message passing implementation](Message-passing-implementation.html) +- [Acking framework implementation](Acking-framework-implementation.html) +- [Metrics](Metrics.html) +- [Nimbus HA](nimbus-ha-design.html) diff --git a/docs/Installing-native-dependencies.md b/docs/Installing-native-dependencies.md new file mode 100644 index 00000000000..1937d4bffcf --- /dev/null +++ b/docs/Installing-native-dependencies.md @@ -0,0 +1,38 @@ +--- +layout: documentation +--- +The native dependencies are only needed on actual Storm clusters. When running Storm in local mode, Storm uses a pure Java messaging system so that you don't need to install native dependencies on your development machine. + +Installing ZeroMQ and JZMQ is usually straightforward. Sometimes, however, people run into issues with autoconf and get strange errors. If you run into any issues, please email the [Storm mailing list](http://groups.google.com/group/storm-user) or come get help in the #storm-user room on freenode. + +Storm has been tested with ZeroMQ 2.1.7, and this is the recommended ZeroMQ release that you install. You can download a ZeroMQ release [here](http://download.zeromq.org/). Installing ZeroMQ should look something like this: + +``` +wget http://download.zeromq.org/zeromq-2.1.7.tar.gz +tar -xzf zeromq-2.1.7.tar.gz +cd zeromq-2.1.7 +./configure +make +sudo make install +``` + +JZMQ is the Java bindings for ZeroMQ. JZMQ doesn't have any releases (we're working with them on that), so there is risk of a regression if you always install from the master branch. To prevent a regression from happening, you should instead install from [this fork](http://github.com/nathanmarz/jzmq) which is tested to work with Storm. Installing JZMQ should look something like this: + +``` +#install jzmq +git clone https://github.com/nathanmarz/jzmq.git +cd jzmq +./autogen.sh +./configure +make +sudo make install +``` + +To get the JZMQ build to work, you may need to do one or all of the following: + +1. Set JAVA_HOME environment variable appropriately +2. Install Java dev package (more info [here](http://codeslinger.posterous.com/getting-zeromq-and-jzmq-running-on-mac-os-x) for Mac OSX users) +3. Upgrade autoconf on your machine +4. Follow the instructions in [this blog post](http://blog.pmorelli.com/getting-zeromq-and-jzmq-running-on-mac-os-x) + +If you run into any errors when running `./configure`, [this thread](http://stackoverflow.com/questions/3522248/how-do-i-compile-jzmq-for-zeromq-on-osx) may provide a solution. diff --git a/docs/Joins.md b/docs/Joins.md new file mode 100644 index 00000000000..9efb7c68f17 --- /dev/null +++ b/docs/Joins.md @@ -0,0 +1,139 @@ +--- +title: Joining Streams in Storm Core +layout: documentation +documentation: true +--- + +Storm core supports joining multiple data streams into one with the help of `JoinBolt`. +`JoinBolt` is a Windowed bolt, i.e. it waits for the configured window duration to match up the +tuples among the streams being joined. This helps align the streams within a Window boundary. + +Each of `JoinBolt`'s incoming data streams must be Fields Grouped on a single field. A stream +should only be joined with the other streams using the field on which it has been FieldsGrouped. +Knowing this will help understand the join syntax described below. + +## Performing Joins +Consider the following SQL join involving 4 tables: + +```sql +select userId, key4, key2, key3 +from table1 +inner join table2 on table2.userId = table1.key1 +inner join table3 on table3.key3 = table2.userId +left join table4 on table4.key4 = table3.key3 +``` + +Similar joins could be expressed on tuples generated by 4 spouts using `JoinBolt`: + +```java +JoinBolt jbolt = new JoinBolt("spout1", "key1") // from spout1 + .join ("spout2", "userId", "spout1") // inner join spout2 on spout2.userId = spout1.key1 + .join ("spout3", "key3", "spout2") // inner join spout3 on spout3.key3 = spout2.userId + .leftJoin ("spout4", "key4", "spout3") // left join spout4 on spout4.key4 = spout3.key3 + .select ("userId, key4, key2, spout3:key3") // chose output fields + .withTumblingWindow( new Duration(10, TimeUnit.MINUTES) ) ; + +topoBuilder.setBolt("joiner", jbolt, 1) + .fieldsGrouping("spout1", new Fields("key1") ) + .fieldsGrouping("spout2", new Fields("userId") ) + .fieldsGrouping("spout3", new Fields("key3") ) + .fieldsGrouping("spout4", new Fields("key4") ); +``` + +The bolt constructor takes two arguments. The 1st argument introduces the data from `spout1` +to be the first stream and specifies that it will always use field `key1` when joining this with the others streams. +The name of the component specified must refer to the spout or bolt that is directly connected to the Join bolt. +Here data received from `spout1` must be fields grouped on `key1`. Similarly, each of the `leftJoin()` and `join()` method +calls introduce a new stream along with the field to use for the join. As seen in above example, the same FieldsGrouping +requirement applies to these streams as well. The 3rd argument to the join methods refers to another stream with which +to join. + +The `select()` method is used to specify the output fields. The argument to `select` is a comma separated list of fields. +Individual field names can be prefixed with a stream name to disambiguate between the same field name occurring in +multiple streams as follows: `.select("spout3:key3, spout4:key3")`. Nested tuple types are supported if the +nesting has been done using `Map`s. For example `outer.inner.innermost` refers to a field that is nested three levels +deep where `outer` and `inner` are of type `Map`. + +Stream name prefix is not allowed for the fields in any of the join() arguments, but nested fields are supported. + +The call to `withTumblingWindow()` above, configures the join window to be a 10 minute tumbling window. Since `JoinBolt` +is a Windowed Bolt, we can also use the `withWindow` method to configure it as a sliding window (see tips section below). + +## Stream names and Join order +* Stream names must be introduced (in constructor or as 1st arg to various join methods) before being referred +to (in the 3rd argument of the join methods). Forward referencing of stream names, as shown below, is not allowed: + +```java +new JoinBolt( "spout1", "key1") + .join ( "spout2", "userId", "spout3") //not allowed. spout3 not yet introduced + .join ( "spout3", "key3", "spout1") +``` +* Internally, the joins will be performed in the order expressed by the user. + +## Joining based on Stream names + +For simplicity, Storm topologies often use the `default` stream. Topologies can also use named streams +instead of `default` streams. To support such topologies, `JoinBolt` can be configured to use stream +names, instead of source component (spout/bolt) names, via the constructor's first argument: + +```java +new JoinBolt(JoinBolt.Selector.STREAM, "stream1", "key1") + .join("stream2", "key2") + ... +``` +The first argument `JoinBolt.Selector.STREAM` informs the bolt that `stream1/2/3/4` refer to named streams +(as opposed to names of upstream spouts/bolts). + + +The below example joins two named streams from four spouts: + +```java +new JoinBolt(JoinBolt.Selector.STREAM, "stream1", "key1") + .join ("stream2", "userId", "stream1" ) + .select ("userId, key1, key2") + .withTumblingWindow( new Duration(10, TimeUnit.MINUTES) ) ; + +topoBuilder.setBolt("joiner", jbolt, 1) + .fieldsGrouping("bolt1", "stream1", new Fields("key1") ) + .fieldsGrouping("bolt2", "stream1", new Fields("key1") ) + .fieldsGrouping("bolt3", "stream2", new Fields("userId") ) + .fieldsGrouping("bolt4", "stream1", new Fields("key1") ); +``` + +In the above example, it is possible that `bolt1`, for example, is emitting other streams also. But the join bolt +is only subscribing to `stream1` & `stream2` from the different bolts. `stream1` from `bolt1`, `bolt2` and `bolt4` +is treated as a single stream and joined against `stream2` from `bolt3`. + +## Limitations: +1. Currently only INNER and LEFT joins are supported. + +2. Unlike SQL, which allows joining the same table on different keys to different tables, here the same one field must be used + on a stream. Fields Grouping ensures the right tuples are routed to the right instances of a Join Bolt. Consequently the + FieldsGrouping field must be same as the join field, for correct results. To perform joins on multiple fields, the fields + can be combined into one field and then sent to the Join bolt. + + +## Tips: + +1. Joins can be CPU and memory intensive. The larger the data accumulated in the current window (proportional to window + length), the longer it takes to do the join. Having a short sliding interval (few seconds for example) triggers frequent + joins. Consequently performance can suffer if using large window lengths or small sliding intervals or both. + +2. Duplication of joined records across windows is possible when using Sliding Windows. This is because the tuples continue to exist + across multiple windows when using Sliding Windows. + +3. If message timeouts are enabled, ensure the timeout setting (topology.message.timeout.secs) is large enough to comfortably + accommodate the window size, plus the additional processing by other spouts and bolts. + +4. Joining a window of two streams with M and N elements each, *in the worst case*, can produce MxN elements with every output tuple + anchored to one tuple from each input stream. This can mean a lot of output tuples from JoinBolt and even more ACKs for downstream bolts + to emit. This can place a substantial pressure on the messaging system and dramatically slowdown the topology if not careful. + To manage the load on the messaging subsystem, it is advisable to: + * Increase the worker's heap (topology.worker.max.heap.size.mb). + * **If** ACKing is not necessary for your topology, disable ACKers (topology.acker.executors=0). + * Disable event logger (topology.eventlogger.executors=0). + * Turn of topology debugging (topology.debug=false). + * Set topology.max.spout.pending to a value large enough to accommodate an estimated full window worth of tuples plus some more for headroom. + This helps mitigate the possibility of spouts emitting excessive tuples when messaging subsystem is experiencing excessive load. This situation + can occur when its value is set to null. + * Lastly, keep the window size to the minimum value necessary for solving the problem at hand. diff --git a/docs/Kestrel-and-Storm.md b/docs/Kestrel-and-Storm.md new file mode 100644 index 00000000000..1efe39768de --- /dev/null +++ b/docs/Kestrel-and-Storm.md @@ -0,0 +1,202 @@ +--- +title: Storm and Kestrel +layout: documentation +documentation: true +--- +This page explains how to use Storm to consume items from a Kestrel cluster. + +## Preliminaries +### Storm +This tutorial uses examples from the [storm-kestrel](https://github.com/nathanmarz/storm-kestrel) project and the [storm-starter](http://github.com/apache/storm/blob/{{page.version}}/examples/storm-starter) project. It's recommended that you clone those projects and follow along with the examples. Read [Setting up development environment](Setting-up-development-environment.html) and [Creating a new Storm project](Creating-a-new-Storm-project.html) to get your machine set up. +### Kestrel +It assumes you are able to run locally a Kestrel server as described [here](https://github.com/nathanmarz/storm-kestrel). + +## Kestrel Server and Queue +A single kestrel server has a set of queues. A Kestrel queue is a very simple message queue that runs on the JVM and uses the memcache protocol (with some extensions) to talk to clients. For details, look at the implementation of the [KestrelThriftClient](https://github.com/nathanmarz/storm-kestrel/blob/master/src/jvm/org/apache/storm/spout/KestrelThriftClient.java) class provided in [storm-kestrel](https://github.com/nathanmarz/storm-kestrel) project. + +Each queue is strictly ordered following the FIFO (first in, first out) principle. To keep up with performance items are cached in system memory; though, only the first 128MB is kept in memory. When stopping the server, the queue state is stored in a journal file. + +Further, details can be found [here](https://github.com/nathanmarz/kestrel/blob/master/docs/guide.md). + +Kestrel is: +* fast +* small +* durable +* reliable + +For instance, Twitter uses Kestrel as the backbone of its messaging infrastructure as described [here] (http://bhavin.directi.com/notes-on-kestrel-the-open-source-twitter-queue/). + +## Add items to Kestrel +At first, we need to have a program that can add items to a Kestrel queue. The following method takes benefit of the KestrelClient implementation in [storm-kestrel](https://github.com/nathanmarz/storm-kestrel). It adds sentences into a Kestrel queue randomly chosen out of an array that holds five possible sentences. + +``` + private static void queueSentenceItems(KestrelClient kestrelClient, String queueName) + throws ParseError, IOException { + + String[] sentences = new String[] { + "the cow jumped over the moon", + "an apple a day keeps the doctor away", + "four score and seven years ago", + "snow white and the seven dwarfs", + "i am at two with nature"}; + + Random _rand = new Random(); + + for(int i=1; i<=10; i++){ + + String sentence = sentences[_rand.nextInt(sentences.length)]; + + String val = "ID " + i + " " + sentence; + + boolean queueSucess = kestrelClient.queue(queueName, val); + + System.out.println("queueSucess=" +queueSucess+ " [" + val +"]"); + } + } +``` + +## Remove items from Kestrel + +This method dequeues items from a queue without removing them. + +``` + private static void dequeueItems(KestrelClient kestrelClient, String queueName) throws IOException, ParseError + { + for(int i=1; i<=12; i++){ + + Item item = kestrelClient.dequeue(queueName); + + if(item==null){ + System.out.println("The queue (" + queueName + ") contains no items."); + } + else + { + byte[] data = item._data; + + String receivedVal = new String(data); + + System.out.println("receivedItem=" + receivedVal); + } + } +``` + +This method dequeues items from a queue and then removes them. + +``` + private static void dequeueAndRemoveItems(KestrelClient kestrelClient, String queueName) + throws IOException, ParseError + { + for(int i=1; i<=12; i++){ + + Item item = kestrelClient.dequeue(queueName); + + + if(item==null){ + System.out.println("The queue (" + queueName + ") contains no items."); + } + else + { + int itemID = item._id; + + + byte[] data = item._data; + + String receivedVal = new String(data); + + kestrelClient.ack(queueName, itemID); + + System.out.println("receivedItem=" + receivedVal); + } + } + } +``` + +## Add Items continuously to Kestrel + +This is our final program to run in order to add continuously sentence items to a queue called **sentence_queue** of a locally running Kestrel server. + +In order to stop it type a closing bracket char ']' in console and hit 'Enter'. + +``` + import java.io.IOException; + import java.io.InputStream; + import java.util.Random; + + import org.apache.storm.spout.KestrelClient; + import org.apache.storm.spout.KestrelClient.Item; + import org.apache.storm.spout.KestrelClient.ParseError; + + public class AddSentenceItemsToKestrel { + + /** + * @param args + */ + public static void main(String[] args) { + + InputStream is = System.in; + + char closing_bracket = ']'; + + int val = closing_bracket; + + boolean aux = true; + + try { + + KestrelClient kestrelClient = null; + String queueName = "sentence_queue"; + + while(aux){ + + kestrelClient = new KestrelClient("localhost",22133); + + queueSentenceItems(kestrelClient, queueName); + + kestrelClient.close(); + + Thread.sleep(1000); + + if(is.available()>0){ + if(val==is.read()) + aux=false; + } + } + } catch (IOException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + catch (ParseError e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } catch (InterruptedException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + + System.out.println("end"); + + } + } +``` +## Using KestrelSpout + +This topology reads sentences off of a Kestrel queue using KestrelSpout, splits the sentences into its constituent words (Bolt: SplitSentence), and then emits for each word the number of times it has seen that word before (Bolt: WordCount). How data is processed is described in detail in [Guaranteeing message processing](Guaranteeing-message-processing.html). + +``` + TopologyBuilder builder = new TopologyBuilder(); + builder.setSpout("sentences", new KestrelSpout("localhost",22133,"sentence_queue",new StringScheme())); + builder.setBolt("split", new SplitSentence(), 10) + .shuffleGrouping("sentences"); + builder.setBolt("count", new WordCount(), 20) + .fieldsGrouping("split", new Fields("word")); +``` + +## Execution + +At first, start your local kestrel server in production or development mode. + +Than, wait about 5 seconds in order to avoid a ConnectException. + +Now execute the program to add items to the queue and launch the Storm topology. The order in which you launch the programs is of no importance. + +If you run the topology with TOPOLOGY_DEBUG you should see tuples being emitted in the topology. diff --git a/docs/Lifecycle-of-a-topology.md b/docs/Lifecycle-of-a-topology.md new file mode 100644 index 00000000000..fe785f1e4c3 --- /dev/null +++ b/docs/Lifecycle-of-a-topology.md @@ -0,0 +1,82 @@ +--- +title: Lifecycle of a Storm Topology +layout: documentation +documentation: true +--- +(**NOTE**: this page is based on the 0.7.1 code; many things have changed since then, including a split between tasks and executors, and a reorganization of the code under `storm-client/src` rather than `src/`.) + +This page explains in detail the lifecycle of a topology from running the "storm jar" command to uploading the topology to Nimbus to the supervisors starting/stopping workers to workers and tasks setting themselves up. It also explains how Nimbus monitors topologies and how topologies are shutdown when they are killed. + +First a couple of important notes about topologies: + +1. The actual topology that runs is different than the topology the user specifies. The actual topology has implicit streams and an implicit "acker" bolt added to manage the acking framework (used to guarantee data processing). The implicit topology is created via the [system-topology!](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/common.clj#L188) function. +2. `system-topology!` is used in two places: + - when Nimbus is creating tasks for the topology [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/nimbus.clj#L316) + - in the worker so it knows where it needs to route messages to [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/worker.clj#L90) + +## Starting a topology + +- "storm jar" command executes your class with the specified arguments. The only special thing that "storm jar" does is set the "storm.jar" environment variable for use by `StormSubmitter` later. [code](https://github.com/apache/storm/blob/0.7.1/bin/storm#L101) +- When your code uses `StormSubmitter.submitTopology`, `StormSubmitter` takes the following actions: + - First, `StormSubmitter` uploads the jar if it hasn't been uploaded before. [code](https://github.com/apache/storm/blob/0.7.1/src/jvm/org/apache/storm/StormSubmitter.java#L83) + - Jar uploading is done via Nimbus's Thrift interface [code](https://github.com/apache/storm/blob/0.7.1/src/storm.thrift#L200) + - `beginFileUpload` returns a path in Nimbus's inbox + - 15 kilobytes are uploaded at a time through `uploadChunk` + - `finishFileUpload` is called when it's finished uploading + - Here is Nimbus's implementation of those Thrift methods: [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/nimbus.clj#L694) + - Second, `StormSubmitter` calls `submitTopology` on the Nimbus thrift interface [code](https://github.com/apache/storm/blob/0.7.1/src/jvm/org/apache/storm/StormSubmitter.java#L60) + - The topology config is serialized using JSON (JSON is used so that writing DSL's in any language is as easy as possible) + - Notice that the Thrift `submitTopology` call takes in the Nimbus inbox path where the jar was uploaded + +- Nimbus receives the topology submission. [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/nimbus.clj#L639) +- Nimbus normalizes the topology configuration. The main purpose of normalization is to ensure that every single task will have the same serialization registrations, which is critical for getting serialization working correctly. [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/nimbus.clj#L557) +- Nimbus sets up the static state for the topology [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/nimbus.clj#L661) + - Jars and configs are kept on local filesystem because they're too big for Zookeeper. The jar and configs are copied into the path {nimbus local dir}/stormdist/{topology id} + - `setup-storm-static` writes task -> component mapping into ZK + - `setup-heartbeats` creates a ZK "directory" in which tasks can heartbeat +- Nimbus calls `mk-assignment` to assign tasks to machines [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/nimbus.clj#L458) + - Assignment record definition is here: [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/common.clj#L25) + - Assignment contains: + - `master-code-dir`: used by supervisors to download the correct jars/configs for the topology from Nimbus + - `task->node+port`: Map from a task id to the worker that task should be running on. (A worker is identified by a node/port pair) + - `node->host`: A map from node id to hostname. This is used so workers know which machines to connect to to communicate with other workers. Node ids are used to identify supervisors so that multiple supervisors can be run on one machine. One place this is done is with Mesos integration. + - `task->start-time-secs`: Contains a map from task id to the timestamp at which Nimbus launched that task. This is used by Nimbus when monitoring topologies, as tasks are given a longer timeout to heartbeat when they're first launched (the launch timeout is configured by "nimbus.task.launch.secs" config) +- Once topologies are assigned, they're initially in a deactivated mode. `start-storm` writes data into Zookeeper so that the cluster knows the topology is active and can start emitting tuples from spouts. [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/nimbus.clj#L504) + +- TODO cluster state diagram (show all nodes and what's kept everywhere) + +- Supervisor runs two functions in the background: + - `synchronize-supervisor`: This is called whenever assignments in Zookeeper change and also every 10 seconds. [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/supervisor.clj#L241) + - Downloads code from Nimbus for topologies assigned to this machine for which it doesn't have the code yet. [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/supervisor.clj#L258) + - Writes into local filesystem what this node is supposed to be running. It writes a map from port -> LocalAssignment. LocalAssignment contains a topology id as well as the list of task ids for that worker. [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/supervisor.clj#L13) + - `sync-processes`: Reads from the LFS what `synchronize-supervisor` wrote and compares that to what's actually running on the machine. It then starts/stops worker processes as necessary to synchronize. [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/supervisor.clj#L177) + +- Worker processes start up through the `mk-worker` function [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/worker.clj#L67) + - Worker connects to other workers and starts a thread to monitor for changes. So if a worker gets reassigned, the worker will automatically reconnect to the other worker's new location. [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/worker.clj#L123) + - Monitors whether a topology is active or not and stores that state in the `storm-active-atom` variable. This variable is used by tasks to determine whether or not to call `nextTuple` on the spouts. [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/worker.clj#L155) + - The worker launches the actual tasks as threads within it [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/worker.clj#L178) +- Tasks are set up through the `mk-task` function [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/task.clj#L160) + - Tasks set up routing function which takes in a stream and an output tuple and returns a list of task ids to send the tuple to [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/task.clj#L207) (there's also a 3-arity version used for direct streams) + - Tasks set up the spout-specific or bolt-specific code with [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/task.clj#L241) + +## Topology Monitoring + +- Nimbus monitors the topology during its lifetime + - Schedules recurring task on the timer thread to check the topologies [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/nimbus.clj#L623) + - Nimbus's behavior is represented as a finite state machine [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/nimbus.clj#L98) + - The "monitor" event is called on a topology every "nimbus.monitor.freq.secs", which calls `reassign-topology` through `reassign-transition` [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/nimbus.clj#L497) + - `reassign-topology` calls `mk-assignments`, the same function used to assign the topology the first time. `mk-assignments` is also capable of incrementally updating a topology + - `mk-assignments` checks heartbeats and reassigns workers as necessary + - Any reassignments change the state in ZK, which will trigger supervisors to synchronize and start/stop workers + +## Killing a topology + +- "storm kill" command runs this code which just calls the Nimbus Thrift interface to kill the topology: [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/command/kill_topology.clj) +- Nimbus receives the kill command [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/nimbus.clj#L671) +- Nimbus applies the "kill" transition to the topology [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/nimbus.clj#L676) +- The kill transition function changes the status of the topology to "killed" and schedules the "remove" event to run "wait time seconds" in the future. [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/nimbus.clj#L63) + - The wait time defaults to the topology message timeout but can be overridden with the -w flag in the "storm kill" command + - This causes the topology to be deactivated for the wait time before its actually shut down. This gives the topology a chance to finish processing what it's currently processing before shutting down the workers + - Changing the status during the kill transition ensures that the kill protocol is fault-tolerant to Nimbus crashing. On startup, if the status of the topology is "killed", Nimbus schedules the remove event to run "wait time seconds" in the future [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/nimbus.clj#L111) +- Removing a topology cleans out the assignment and static information from ZK [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/nimbus.clj#L116) +- A separate cleanup thread runs the `do-cleanup` function which will clean up the heartbeat dir and the jars/configs stored locally. [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/nimbus.clj#L577) diff --git a/docs/Local-mode.md b/docs/Local-mode.md new file mode 100644 index 00000000000..3b83d54a681 --- /dev/null +++ b/docs/Local-mode.md @@ -0,0 +1,86 @@ +--- +title: Local Mode +layout: documentation +documentation: true +--- +Local mode simulates a Storm cluster in process and is useful for developing and testing topologies. Running topologies in local mode is similar to running topologies [on a cluster](Running-topologies-on-a-production-cluster.html). + +To run a topology in local mode you have two options. The most common option is to run your topology with `storm local` instead of `storm jar` + +This will bring up a local simulated cluster and force all interactions with nimbus to go through the simulated cluster instead of going to a separate process. By default this will run the process for 20 seconds before tearing down the entire cluster. You can override this by including a `--local-ttl` command line option which sets the number of seconds it should run for. + +### Programmatic + +If you want to do some automated testing but without actually launching a storm cluster you can use the same classes internally that `storm local` does. + +To do this you first need to pull in the dependencies needed to access these classes. For the java API you should depend on `storm-server` as a `test` dependency. + +To create an in-process cluster, simply use the `LocalCluster` class. For example: + +```java +import org.apache.storm.LocalCluster; + +... + +try (LocalCluster cluster = new LocalCluster()) { + //Interact with the cluster... +} +``` + +You can then submit topologies using the `submitTopology` method on the `LocalCluster` object. Just like the corresponding method on [StormSubmitter](javadocs/org/apache/storm/StormSubmitter.html), `submitTopology` takes a name, a topology configuration, and the topology object. You can then kill a topology using the `killTopology` method which takes the topology name as an argument. + +The `LocalCluster` is an `AutoCloseable` and will shut down when close is called. + +many of the Nimbus APIs are also available through the LocalCluster. + +### DRPC + +DRPC can be run in local mode as well. Here's how to run the above example in local mode: + +```java +try (LocalDRPC drpc = new LocalDRPC(); + LocalCluster cluster = new LocalCluster(); + LocalTopology topo = cluster.submitTopology("drpc-demo", conf, builder.createLocalTopology(drpc))) { + + System.out.println("Results for 'hello':" + drpc.execute("exclamation", "hello")); +} +``` + +First you create a `LocalDRPC` object. This object simulates a DRPC server in process, just like how `LocalCluster` simulates a Storm cluster in process. Then you create the `LocalCluster` to run the topology in local mode. `LinearDRPCTopologyBuilder` has separate methods for creating local topologies and remote topologies. In local mode the `LocalDRPC` object does not bind to any ports so the topology needs to know about the object to communicate with it. This is why `createLocalTopology` takes in the `LocalDRPC` object as input. + +After launching the topology, you can do DRPC invocations using the `execute` method on `LocalDRPC`. + +Because all of the objects used are instances of AutoCloseable when the try blocks scope ends the topology is killed, the cluster is shut down and the drpc server also shuts down. + +### Clojure API + +Storm also offers a clojure API for testing. + +[This blog post](http://www.pixelmachine.org/2011/12/21/Testing-Storm-Topologies-Part-2.html) talk about this, but is a little out of date. To get this functionality you need to include the `storm-clojure-test` dependency. This will pull in a lot of storm itself that should not be packaged with your topology, sp please make sure it is a test dependency only,. + +### Debugging your topology with an IDE + +One of the great use cases for local mode is to be able to walk through the code execution of your bolts and spouts using an IDE. You can do this on the command line by adding the `--java-debug` option followed by the parameter you would pass to jdwp. This makes it simple to launch the local cluster with `-agentlib:jdwp=` turned on. + +When running from within an IDE itself you can modify your code run run withing a call to `LocalCluster.withLocalModeOverride` + +```java +public static void main(final String args[]) { + LocalCluster.withLocalModeOverride(() -> originalMain(args), 10); +} +``` + +Or you could also modify the IDE to run "org.apache.storm.LocalCluster" instead of your main class when launching, and pass in the name of the class as an argument to it. This will also trigger local mode, and is what `storm local` does behind the scenes. + +### Common configurations for local mode + +You can see a full list of configurations [here](javadocs/org/apache/storm/Config.html). + +1. **Config.TOPOLOGY_MAX_TASK_PARALLELISM**: This config puts a ceiling on the number of threads spawned for a single component. Oftentimes production topologies have a lot of parallelism (hundreds of threads) which places unreasonable load when trying to test the topology in local mode. This config lets you easy control that parallelism. +2. **Config.TOPOLOGY_DEBUG**: When this is set to true, Storm will log a message every time a tuple is emitted from any spout or bolt. This is extremely useful for debugging.A + +These, like all other configs, can be set on the command line when launching your topology with the `-c` flag. The flag is of the form `-c =` so to enable debugging when launching your topology in local mode you could run + +``` +storm local topology.jar -c topology.debug=true +``` diff --git a/docs/LocalityAwareness.md b/docs/LocalityAwareness.md new file mode 100644 index 00000000000..517f1c7d961 --- /dev/null +++ b/docs/LocalityAwareness.md @@ -0,0 +1,84 @@ +--- +title: Locality Awareness In LoadAwareShuffleGrouping +layout: documentation +documentation: true +--- + +# Locality Awareness In LoadAwareShuffleGrouping + +### Motivation + +Apache Storm has introduced locality awareness to LoadAwareShuffleGrouping based on Bang-Bang control theory. +It aims to keep traffic to closer downstream executors to avoid network latency when those executors are not under heavy load. +It can also avoid serialization/deserialization overhead if the traffic happens in the same worker. + +### How it works + +An executor (say `E`) which has LoadAwareShuffleGrouping to downstream executors views them in four `scopes` based on their locations relative to the executor `E` itself. +The four scopes are: + +* `WORKER_LOCAL`: every downstream executor located on the same worker as this executor `E` +* `HOST_LOCAL`: every downstream executor located on the same host as this executor `E` +* `RACK_LOCAL`: every downstream executor located on the same rack as this executor `E` +* `EVERYTHING`: every downstream executor of the executor `E` + +It starts with sending tuples to the downstream executors in the scope of `WORKER_LOCAL`. +The downstream executors in the scope are chosen based on their load. Executors with lower load are more likely to be chosen. +Once the average load of these `WORKER_LOCAL` executors reaches `topology.localityaware.higher.bound`, +it switches to the higher scope which is `HOST_LOCAL` and starts sending tuples in that scope. +And if the average load is still higher than the `higher bound`, it switches to a higher scope. + +On the other hand, it switches to a lower scope if the average load of the lower scope is less than `topology.localityaware.lower.bound`. + + +### How is Load calculated + +The load of an downstream executor is the maximum of the following two: + +* The population percentage of the receive queue +* Math.min(pendingMessages, 1024) / 1024. + +`pendingMessages`: The upstream executor `E` sends messages to the downstream executor through Netty and the `pendingMessages` is the number of messages that haven't got through to the server. + +If the downstream executor located on the same worker as the executor `E`, the load of that downstream executor is: +* The population percentage of the receive queue + +### Relationship between Load and Capacity + +The capacity of a bolt executor on Storm UI is calculated as: + * (number executed * average execute latency) / measurement time + +It basically means how busy this executor is. If this is around 1.0, the corresponding Bolt is running as fast as it can. A `__capacity` metric exists to track this value for each executor. + +The `Capacity` is not related to the `Load`: + +* If the `Load` of the executor `E1` is high, + * the `Capacity` of `E1` could be high: population of the receive queue of `E1` could be high and it means the executor `E` has more work to do. + * the `Capacity` could also be low: `pendingMessage` could be high because other executors share the netty connection between the two workers and they are sending too many messages. + But the actual population of the receive queue of `E1` might be low. +* If the `Load` is low, + * the `Capacity` could be low: lower `Load` means less work to do. + * the `Capacity` could also be high: because the executor could be receiving tuples and executing tuples at the similar average rate. +* If the `Capacity` is high, + * the `Load` could be high: high `Capacity` means the executor is busy. It could be because it's receiving too many tuples. + * the `Load` could also be low: because the executor could be receiving tuples and executing tuples at the similar average rate. +* If the `Capacity` is low, + * the `Load` could be low: if the `pendingMessage` is low + * the `Load` could also be high: because the `pendingMessage` might be very high. + + +### Troubleshooting + +#### I am seeing high capacity (close to 1.0) on some executors and low capacity (close to 0) on other executors + +1. It could mean that you could reduce parallelism. Your executors are able to keep up and the load never gets to a very high point. + +2. You can try to adjust `topology.localityaware.higher.bound` and `topology.localityaware.lower.bound` + +3. You can try to enable `topology.ras.order.executors.by.proximity.needs`. With this config, unassigned executors will be sorted by topological order +with network proximity needs before being scheduled. This is a best-effort to split the topology to slices and allocate executors in each slice to as closest physical location as possible. + + +#### I just want the capacity on every downstream executor to be even + +You can turn off LoadAwareShuffleGrouping by setting `topology.disable.loadaware.messaging` to `true`. diff --git a/docs/Logs.md b/docs/Logs.md new file mode 100644 index 00000000000..46cc922c9ec --- /dev/null +++ b/docs/Logs.md @@ -0,0 +1,30 @@ +--- +title: Storm Logs +layout: documentation +documentation: true +--- +Logs in Storm are essential for tracking the status, operations, error messages and debug information for all the +daemons (e.g., nimbus, supervisor, logviewer, drpc, ui, pacemaker) and topologies' workers. + +### Location of the Logs +All the daemon logs are placed under ${storm.log.dir} directory, which an administrator can set in the System properties or +in the cluster configuration. By default, ${storm.log.dir} points to ${storm.home}/logs. + +All the worker logs are placed under the workers-artifacts directory in a hierarchical manner, e.g., +${workers-artifacts}/${topologyId}/${port}/worker.log. Users can set the workers-artifacts directory +by configuring the variable "storm.workers.artifacts.dir". By default, workers-artifacts directory +locates at ${storm.log.dir}/logs/workers-artifacts. + +### Using the Storm UI for Log View/Download and Log Search +Daemon and worker logs are allowed to view and download through Storm UI by authorized users. + +To improve the debugging of Storm, we provide the Log Search feature. +Log Search supports searching in a certain log file or in all of a topology's log files: + +String search in a log file: In the log page for a worker, a user can search a certain string, e.g., "Exception", in a certain worker log. This search can happen for both normal text log or rolled zip log files. In the results, the offset and matched lines will be displayed. + +![Search in a log](images/search-for-a-single-worker-log.png "Search in a log") + +Search in a topology: a user can also search a string for a certain topology by clicking the icon of the magnifying lens at the top right corner of the UI page. This means the UI will try to search on all the supervisor nodes in a distributed way to find the matched string in all logs for this topology. The search can happen for either normal text log files or rolled zip log files by checking/unchecking the "Search archived logs:" box. Then the matched results can be shown on the UI with url links, directing the user to the certain logs on each supervisor node. This powerful feature is very helpful for users to find certain problematic supervisor nodes running this topology. + +![Search in a topology](images/search-a-topology.png "Search in a topology") diff --git a/docs/Maven.md b/docs/Maven.md new file mode 100644 index 00000000000..47fd1b1acc1 --- /dev/null +++ b/docs/Maven.md @@ -0,0 +1,22 @@ +--- +title: Maven +layout: documentation +documentation: true +--- +To develop topologies, you'll need the Storm jars on your classpath. You should either include the unpacked jars in the classpath for your project or use Maven to include Storm as a development dependency. Storm is hosted on Maven Central. To include Storm in your project as a development dependency, add the following to your pom.xml: + + +```xml + + org.apache.storm + storm-client + {{page.version}} + provided + +``` + +[Here's an example]({{page.git-blob-base}}/examples/storm-starter/pom.xml) of a pom.xml for a Storm project. + +### Developing Storm + +Please refer to [DEVELOPER.md]({{page.git-blob-base}}/DEVELOPER.md) for more details. diff --git a/docs/Message-passing-implementation.md b/docs/Message-passing-implementation.md new file mode 100644 index 00000000000..fd4cf2cbeea --- /dev/null +++ b/docs/Message-passing-implementation.md @@ -0,0 +1,30 @@ +--- +title: Message Passing Implementation +layout: documentation +documentation: true +--- +(Note: this walkthrough is out of date as of 0.8.0. 0.8.0 revamped the message passing infrastructure to be based on the Disruptor) + +This page walks through how emitting and transferring tuples works in Storm. + +- Worker is responsible for message transfer + - `refresh-connections` is called every "task.refresh.poll.secs" or whenever assignment in ZK changes. It manages connections to other workers and maintains a mapping from task -> worker [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/worker.clj#L123) + - Provides a "transfer function" that is used by tasks to send tuples to other tasks. The transfer function takes in a task id and a tuple, and it serializes the tuple and puts it onto a "transfer queue". There is a single transfer queue for each worker. [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/worker.clj#L56) + - The serializer is thread-safe [code](https://github.com/apache/storm/blob/0.7.1/src/jvm/org/apache/storm/serialization/KryoTupleSerializer.java#L26) + - The worker has a single thread which drains the transfer queue and sends the messages to other workers [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/worker.clj#L185) + - Message sending happens through this protocol: [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/messaging/protocol.clj) + - The implementation for distributed mode uses ZeroMQ [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/messaging/zmq.clj) + - The implementation for local mode uses in memory Java queues (so that it's easy to use Storm locally without needing to get ZeroMQ installed) [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/messaging/local.clj) +- Receiving messages in tasks works differently in local mode and distributed mode + - In local mode, the tuple is sent directly to an in-memory queue for the receiving task [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/messaging/local.clj#L21) + - In distributed mode, each worker listens on a single TCP port for incoming messages and then routes those messages in-memory to tasks. The TCP port is called a "virtual port", because it receives [task id, message] and then routes it to the actual task. [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/worker.clj#L204) + - The virtual port implementation is here: [code](https://github.com/apache/storm/blob/0.7.1/src/clj/zilch/virtual_port.clj) + - Tasks listen on an in-memory ZeroMQ port for messages from the virtual port [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/task.clj#L201) + - Bolts listen here: [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/task.clj#L489) + - Spouts listen here: [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/task.clj#L382) +- Tasks are responsible for message routing. A tuple is emitted either to a direct stream (where the task id is specified) or a regular stream. In direct streams, the message is only sent if that bolt subscribes to that direct stream. In regular streams, the stream grouping functions are used to determine the task ids to send the tuple to. + - Tasks have a routing map from {stream id} -> {component id} -> {stream grouping function} [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/task.clj#L198) + - The "tasks-fn" returns the task ids to send the tuples to for either regular stream emit or direct stream emit [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/task.clj#L207) + - After getting the output task ids, bolts and spouts use the transfer-fn provided by the worker to actually transfer the tuples + - Bolt transfer code here: [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/task.clj#L429) + - Spout transfer code here: [code](https://github.com/apache/storm/blob/0.7.1/src/clj/org/apache/storm/daemon/task.clj#L329) diff --git a/docs/Metrics.md b/docs/Metrics.md new file mode 100644 index 00000000000..4a2db2728b7 --- /dev/null +++ b/docs/Metrics.md @@ -0,0 +1,313 @@ +--- +title: Storm Metrics +layout: documentation +documentation: true +--- +Storm exposes a metrics interface to report summary statistics across the full topology. +The numbers you see on the UI come from some of these built in metrics, but are reported through the worker heartbeats instead of through the IMetricsConsumer described below. + +If you are looking for cluster wide monitoring please see [Cluster Metrics](ClusterMetrics.html). + +### Metric Types + +Metrics have to implement [`IMetric`]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/metric/api/IMetric.java) which contains just one method, `getValueAndReset` -- do any remaining work to find the summary value, and reset back to an initial state. For example, the MeanReducer divides the running total by its running count to find the mean, then initializes both values back to zero. + +Storm gives you these metric types: + +* [AssignableMetric]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/metric/api/AssignableMetric.java) -- set the metric to the explicit value you supply. Useful if it's an external value or in the case that you are already calculating the summary statistic yourself. +* [CombinedMetric]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/metric/api/CombinedMetric.java) -- generic interface for metrics that can be updated associatively. +* [CountMetric]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/metric/api/CountMetric.java) -- a running total of the supplied values. Call `incr()` to increment by one, `incrBy(n)` to add/subtract the given number. + - [MultiCountMetric]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/metric/api/MultiCountMetric.java) -- a hashmap of count metrics. +* [ReducedMetric]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/metric/api/ReducedMetric.java) + - [MeanReducer]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/metric/api/MeanReducer.java) -- track a running average of values given to its `reduce()` method. (It accepts `Double`, `Integer` or `Long` values, and maintains the internal average as a `Double`.) Despite his reputation, the MeanReducer is actually a pretty nice guy in person. + - [MultiReducedMetric]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/metric/api/MultiReducedMetric.java) -- a hashmap of reduced metrics. + +Be aware that even though `getValueAndReset` can return an object returning any object makes it very difficult for an `IMetricsConsumer` to know how to translate it into something usable. Also note that because it is sent to the `IMetricsConsumer` as a part of a tuple the values returned need to be able to be [serialized](Serialization.html) by your topology. + +### Metrics Consumer + +You can listen and handle the topology metrics via registering Metrics Consumer to your topology. + +To register metrics consumer to your topology, add to your topology's configuration like: + +```java +conf.registerMetricsConsumer(org.apache.storm.metric.LoggingMetricsConsumer.class, 1); +``` + +You can refer [Config#registerMetricsConsumer](javadocs/org/apache/storm/Config.html#registerMetricsConsumer-java.lang.Class-) and overloaded methods from javadoc. + +Otherwise edit the storm.yaml config file: + +```yaml +topology.metrics.consumer.register: + - class: "org.apache.storm.metric.LoggingMetricsConsumer" + parallelism.hint: 1 + - class: "org.apache.storm.metric.HttpForwardingMetricsConsumer" + parallelism.hint: 1 + argument: "http://example.com:8080/metrics/my-topology/" +``` + +Storm adds a MetricsConsumerBolt to your topology for each class in the `topology.metrics.consumer.register` list. Each MetricsConsumerBolt subscribes to receive metrics from all tasks in the topology. The parallelism for each Bolt is set to `parallelism.hint` and `component id` for that Bolt is set to `__metrics_`. If you register the same class name more than once, postfix `#` is appended to component id. + +Storm provides some built-in metrics consumers for you to try out to see which metrics are provided in your topology. + +* [`LoggingMetricsConsumer`]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/metric/LoggingMetricsConsumer.java) -- listens for all metrics and dumps them to log file with TSV (Tab Separated Values). +* [`HttpForwardingMetricsConsumer`]({{page.git-blob-base}}/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/HttpForwardingMetricsConsumer.java) -- listens for all metrics and POSTs them serialized to a configured URL via HTTP. Storm also provides [`HttpForwardingMetricsServer`]({{page.git-blob-base}}/storm-core/src/jvm/org/apache/storm/metric/HttpForwardingMetricsServer.java) as abstract class so you can extend this class and run as a HTTP server, and handle metrics sent by HttpForwardingMetricsConsumer. + +Also, Storm exposes the interface [`IMetricsConsumer`]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/metric/api/IMetricsConsumer.java) for implementing Metrics Consumer so you can create custom metrics consumers and attach to their topologies, or use other great implementation of Metrics Consumers provided by Storm community. Some of examples are [versign/storm-graphite](https://github.com/verisign/storm-graphite), and [storm-metrics-statsd](https://github.com/endgameinc/storm-metrics-statsd). + +When you implement your own metrics consumer, `argument` is passed to Object when [IMetricsConsumer#prepare](javadocs/org/apache/storm/metric/api/IMetricsConsumer.html#prepare-java.util.Map-java.lang.Object-org.apache.storm.task.TopologyContext-org.apache.storm.task.IErrorReporter-) is called, so you need to infer the Java type of configured value on yaml, and do explicit type casting. + +Please keep in mind that MetricsConsumerBolt is just a kind of Bolt, so whole throughput of the topology will go down when registered metrics consumers cannot keep up handling incoming metrics, so you may want to take care of those Bolts like normal Bolts. One of idea to avoid this is making your implementation of Metrics Consumer as `non-blocking` fashion. + + +### Build your own metric (task level) + +You can measure your own metric by registering `IMetric` to Metric Registry. + +Suppose we would like to measure execution count of Bolt#execute. Let's start with defining metric instance. CountMetric seems to fit our use case. + +```java +private transient CountMetric countMetric; +``` + +Notice we define it as transient. IMertic is not Serializable so we defined as transient to avoid any serialization issues. + +Next, let's initialize and register the metric instance. + +```java +@Override +public void prepare(Map conf, TopologyContext context, OutputCollector collector) { + // other initialization here. + countMetric = new CountMetric(); + context.registerMetric("execute_count", countMetric, 60); +} +``` + +The meaning of first and second parameters are straightforward, metric name and instance of IMetric. Third parameter of [TopologyContext#registerMetric](javadocs/org/apache/storm/task/TopologyContext.html#registerMetric-java.lang.String-T-int-) is the period (seconds) to publish and reset the metric. + +Last, let's increment the value when Bolt.execute() is executed. + +```java +public void execute(Tuple input) { + countMetric.incr(); + // handle tuple here. +} +``` + +Note that sample rate for topology metrics is not applied to custom metrics since we're calling incr() ourselves. + +Done! `countMetric.getValueAndReset()` is called every 60 seconds as we registered as period, and pair of ("execute_count", value) will be pushed to MetricsConsumer. + +### Build your own metrics (worker level) + +You can register your own worker level metrics by adding them to `Config.WORKER_METRICS` for all workers in cluster, or `Config.TOPOLOGY_WORKER_METRICS` for all workers in specific topology. + +For example, we can add `worker.metrics` to storm.yaml in cluster, + +```yaml +worker.metrics: + metricA: "aaa.bbb.ccc.ddd.MetricA" + metricB: "aaa.bbb.ccc.ddd.MetricB" + ... +``` + +or put `Map` (metric name, metric class name) with key `Config.TOPOLOGY_WORKER_METRICS` to config map. + +There are some restrictions for worker level metric instances: + +A) Metrics for worker level should be kind of gauge since it is initialized and registered from SystemBolt and not exposed to user tasks. + +B) Metrics will be initialized with default constructor, and no injection for configuration or object will be performed. + +C) Bucket size (seconds) for metrics is fixed to `Config.TOPOLOGY_BUILTIN_METRICS_BUCKET_SIZE_SECS`. + +### Builtin Metrics + +The [builtin metrics]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/daemon/metrics/BuiltinMetricsUtil.java) instrument Storm itself. + +[BuiltinMetricsUtil.java]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/daemon/metrics/BuiltinMetricsUtil.java) sets up data structures for the built-in metrics, and facade methods that the other framework components can use to update them. The metrics themselves are calculated in the calling code -- see for example [`ackSpoutMsg`]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/executor/Executor.java). + +#### Reporting Rate + +The rate at which built in metrics are reported is configurable through the `topology.builtin.metrics.bucket.size.secs` config. If you set this too low it can overload the consumers, +so please use caution when modifying it. + +#### Tuple Counting Metrics + +There are several different metrics related to counting what a bolt or spout does to a tuple. These include things like emitting, transferring, acking, and failing of tuples. + +In general all of these tuple count metrics are randomly sub-sampled unless otherwise stated. This means that the counts you see both on the UI and from the built in metrics are not necessarily exact. In fact by default we sample only 5% of the events and estimate the total number of events from that. The sampling percentage is configurable per topology through the `topology.stats.sample.rate` config. Setting it to 1.0 will make the counts exact, but be aware that the more events we sample the slower your topology will run (as the metrics are counted in the same code path as tuples are processed). This is why we have a 5% sample rate as the default. + +The tuple counting metric names contain `"${stream_name}"` or `"${upstream_component}:${stream_name}"`. The former is used for all spout metrics and for outgoing bolt metrics (`__emit-count` and `__transfer-count`). The latter is used for bolt metrics that deal with incoming tuples. + +So for a word count topology the count bolt might show something like the following for the `__ack-count` metric + +``` + "__ack-count-split:default": 80080 +``` + +But the spout instead would show something like the following for the `__ack-count` metric. + +``` + "__ack-count-default": 12500 +``` + + +##### `__ack-count` + +For bolts it is the number of incoming tuples that had the `ack` method called on them. For spouts it is the number of tuples trees that were fully acked. See Guaranteeing Message Processing[](Guaranteeing-message-processing.html) for more information about what a tuple tree is. If acking is disabled this metric is still reported, but it is not really meaningful. + +##### `__fail-count` + +For bolts this is the number of incoming tuples that had the `fail` method called on them. For spouts this is the number of tuple trees that failed. Tuple trees may fail from timing out or because a bolt called fail on it. The two are not separated out by this metric. + +##### `__emit-count` + +This is the total number of times the `emit` method was called to send a tuple. This is the same for both bolts and spouts. + +##### `__transfer-count` + +This is the total number of tuples transferred to a downstream bolt/spout for processing. This number will not always match `__emit_count`. If nothing is registered to receive a tuple down stream the number will be 0 even if tuples were emitted. Similarly if there are multiple down stream consumers it may be a multiple of the number emitted. The grouping also can play a role if it sends the tuple to multiple instances of a single bolt down stream. + +##### `__execute-count` + +This count metric is bolt specific. It counts the number of times that a bolt's `execute` method was called. + +#### Tuple Latency Metrics + +Similar to the tuple counting metrics storm also collects average latency metrics for bolts and spouts. These follow the same structure as the bolt/spout maps and are sub-sampled in the same way as well. In all cases the latency is measured in milliseconds. + +##### `__complete-latency` + +The complete latency is just for spouts. It is the average amount of time it took for `ack` or `fail` to be called for a tuple after it was emitted. If acking is disabled this metric is likely to be blank or 0 for all values, and should be ignored. + +##### `__execute-latency` + +This is just for bolts. It is the average amount of time that the bolt spent in the call to the `execute` method. The higher this gets, the lower the throughput of tuples per bolt instance. + +##### `__process-latency` + +This is also just for bolts. It is the average amount of time between when `execute` was called to start processing a tuple, to when it was acked or failed by the bolt. If your bolt is a very simple bolt and the processing is synchronous then `__process-latency` and `__execute-latency` should be very close to one another, with process latency being slightly smaller. If you are doing a join or have asynchronous processing then it may take a while for a tuple to be acked so the process latency would be higher than the execute latency. + +##### `__skipped-max-spout-ms` + +This metric records how much time a spout was idle because more tuples than `topology.max.spout.pending` were still outstanding. This is the total time in milliseconds, not the average amount of time and is not sub-sampled. + + +##### `__skipped-backpressure-ms` + +This metric records how much time a spout was idle because back-pressure indicated that downstream queues in the topology were too full. This is the total time in milliseconds, not the average amount of time and is not sub-sampled. This is similar to skipped-throttle-ms in Storm 1.x. + +##### `__backpressure-last-overflow-count` + +This metric indicates the overflow count last time BP status was sent, with a minimum value of 1 if a task has backpressure on. + +##### `skipped-inactive-ms` + +This metric records how much time a spout was idle because the topology was deactivated. This is the total time in milliseconds, not the average amount of time and is not sub-sampled. + +#### Error Reporting Metrics + +Storm also collects error reporting metrics for bolts and spouts. + +##### `__reported-error-count` + +This metric records how many errors were reported by a spout/bolt. It is the total number of times the `reportError` method was called. + +#### Queue Metrics + +Each bolt or spout instance in a topology has a receive queue. Each worker also has a worker transfer queue for sending messages to other workers. All of these have metrics that are reported. + +The receive queue metrics are reported under the `receive_queue` name. The metrics for the queue that sends messages to other workers is under the `worker-transfer-queue` metric name for the system bolt (`__system`). + +These queues report the following metrics: + +``` +{ + "arrival_rate_secs": 1229.1195171893523, + "overflow": 0, + "sojourn_time_ms": 2.440771591407277, + "capacity": 1024, + "population": 19, + "pct_full": "0.018". + "insert_failures": "0", + "dropped_messages": "0" +} +``` + +`arrival_rate_secs` is an estimation of the number of tuples that are inserted into the queue in one second, although it is actually the dequeue rate. +The `sojourn_time_ms` is calculated from the arrival rate and is an estimate of how many milliseconds each tuple sits in the queue before it is processed. + +The queue has a set maximum number of entries. If the regular queue fills up an overflow queue takes over. The number of tuples stored in this overflow section are represented by the `overflow` metric. Note that an overflow queue is only used for executors to receive tuples from remote workers. It doesn't apply to intra-worker tuple transfer. + +`capacity` is the maximum number of entries in the queue. `population` is the number of entries currently filled in the queue. 'pct_full' tracks the percentage of capacity in use. + +'insert_failures' tracks the number of failures inserting into the queue. 'dropped_messages' tracks messages dropped due to the overflow queue being full. + +#### System Bolt (Worker) Metrics + +The System Bolt `__system` provides lots of metrics for different worker wide things. The one metric not described here is the `__transfer` queue metric, because it fits with the other disruptor metrics described above. + +Be aware that the `__system` bolt is an actual bolt so regular bolt metrics described above also will be reported for it. + +##### Receive (NettyServer) +`__recv-iconnection` reports stats for the netty server on the worker. This is what gets messages from other workers. It is of the form + +``` +{ + "dequeuedMessages": 0, + "enqueued": { + "/127.0.0.1:49952": 389951 + } +} +``` + +`dequeuedMessages` is a throwback to older code where there was an internal queue between the server and the bolts/spouts. That is no longer the case and the value can be ignored. +`enqueued` is a map between the address of the remote worker and the number of tuples that were sent from it to this worker. + +##### Send (Netty Client) + +The `__send-iconnection` metrics report information about all of the clients for this worker. They are named __send-iconnection-METRIC_TYPE-HOST:PORT for a given Client that is +connected to a worker with the given host/port. These metrics can be disabled by setting topology.enable.send.iconnection.metrics to false. + +The metric types reported for each client are: + + * `reconnects` the number of reconnections that have happened. + * `pending` the number of messages that have not been sent. (This corresponds to messages, not tuples) + * `sent` the number of messages that have been sent. (This is messages not tuples) + * `lostOnSend`. This is the number of messages that were lost because of connection issues. (This is messages not tuples). + +##### JVM Memory + +JVM memory usage is reported through `memory.non-heap` for off heap memory, `memory.heap` for on heap memory and `memory.total` for combined values. These values come from the [MemoryUsage](https://docs.oracle.com/javase/8/docs/api/index.html?java/lang/management/MemoryUsage.html) mxbean. Each of the metrics are reported as a map with the following keys, and values returned by the corresponding java code. + +| Key | Corresponding Code | +|--------|--------------------| +| `max` | `memUsage.getMax()` | +| `committed` | `memUsage.getCommitted()` | +| `init` | `memUsage.getInit()` | +| `used` | `memUsage.getUsed()` | +| `usage` | `Ratio.of(memUsage.getUsed(), memUsage.getMax())` | + +##### JVM Garbage Collection + +The exact GC metric name depends on the garbage collector that your worker uses. The data is all collected from `ManagementFactory.getGarbageCollectorMXBeans()` and the name of the metrics is `"GC"` followed by the name of the returned bean with white space removed. The reported metrics are just + +* `count` the number of gc events that happened and +* `time` the total number of milliseconds that were spent doing gc. + +Please refer to the [JVM documentation](https://docs.oracle.com/javase/8/docs/api/java/lang/management/ManagementFactory.html#getGarbageCollectorMXBeans--) for more details. + +##### JVM Misc + +* There are metrics prefixed with `threads` providing the number of threads, daemon threads, blocked and deadlocked threads. + +##### Other worker metrics + +* `doHeartbeat-calls` is a meter that indicates the rate the worker is performing heartbeats. +* `newWorkerEvent` is 1 when a worker is first started and 0 all other times. This can be used to tell when a worker has crashed and is restarted. +* `startTimeSecs` is when the worker started in seconds since the epoch +* `uptimeSecs` reports the number of seconds the worker has been up for +* `workerCpuUsage` reports the cpu usage of the worker as a percentage of cores. 1.0 indicates 1 cpu core. + diff --git a/docs/Multilang-protocol.md b/docs/Multilang-protocol.md new file mode 100644 index 00000000000..77cb9314cbd --- /dev/null +++ b/docs/Multilang-protocol.md @@ -0,0 +1,337 @@ +--- +title: Multi-Lang Protocol +layout: documentation +documentation: true +--- +This page explains the multilang protocol as of Storm 0.7.1. Versions prior to 0.7.1 used a somewhat different protocol, documented [here](Storm-multi-language-protocol-(versions-0.7.0-and-below\).html). + +# Storm Multi-Language Protocol + +## Supported Languages + +Storm Multi-Language has implementation in the following languages: + +- [JavaScript](https://github.com/apache/storm/tree/master/storm-multilang/javascript) +- [Python](https://github.com/apache/storm/tree/master/storm-multilang/python) +- [Ruby](https://github.com/apache/storm/tree/master/storm-multilang/ruby) + +Third party libraries are available for the following languages: + +- [c# (on .net core 2.0)](https://github.com/Azure/net-storm-multilang-adapter) + +## Shell Components + +Support for multiple languages is implemented via the ShellBolt, +ShellSpout, and ShellProcess classes. These classes implement the +IBolt and ISpout interfaces and the protocol for executing a script or +program via the shell using Java's ProcessBuilder class. + +### Packaging of shell scripts + +By default the ShellProcess assumes that your code is packaged inside of your topology jar under the resources subdirectory of your jar and by default will change the current working directory of +the executable process to be that resources directory extracted from the jar. +A jar file does not store permissions of the files in it. This includes the execute bit that would allow a shell script to be loaded and run by the operating systme. +As such in most examples the scripts are of the form `python3 mybolt.py` because the Python executable is already on the supervisor and mybolt is packaged in the resources directory of the jar. + +If you want to package something more complicated, like a new version of Python itself, you need to instead use the blob store for this and a `.tgz` archive that does support permissions. + +See the docs on the [Blob Store](distcache-blobstore.html) for more details on how to ship a jar. + +To make a ShellBolt/ShellSpout work with executables + scripts shipped in the blob store dist cache add + +``` +changeChildCWD(false); +``` + +in the constructor of your ShellBolt/ShellSpout. The shell command will then be relative to the cwd of the worker. Where the sym-links to the resources are. + +So if I shipped python with a symlink named `newPython` and a python ShellSpout I shipped into `shell_spout.py` I would have a something like + +``` +public MyShellSpout() { + super("./newPython/bin/python3", "./shell_spout.py"); + changeChildCWD(false); +} +``` + +## Output fields + +Output fields are part of the Thrift definition of the topology. This means that when you multilang in Java, you need to create a bolt that extends ShellBolt, implements IRichBolt, and declare the fields in `declareOutputFields` (similarly for ShellSpout). + +You can learn more about this on [Concepts](Concepts.html) + +## Protocol Preamble + +A simple protocol is implemented via the STDIN and STDOUT of the +executed script or program. All data exchanged with the process is +encoded in JSON, making support possible for pretty much any language. + +# Packaging Your Stuff + +To run a shell component on a cluster, the scripts that are shelled +out to must be in the `resources/` directory within the jar submitted +to the master. + +However, during development or testing on a local machine, the resources +directory just needs to be on the classpath. + +## The Protocol + +Notes: + +* Both ends of this protocol use a line-reading mechanism, so be sure to +trim off newlines from the input and to append them to your output. +* All JSON inputs and outputs are terminated by a single line containing "end". Note that this delimiter is not itself JSON encoded. +* The bullet points below are written from the perspective of the script writer's +STDIN and STDOUT. + +### Initial Handshake + +The initial handshake is the same for both types of shell components: + +* STDIN: Setup info. This is a JSON object with the Storm configuration, a PID directory, and a topology context, like this: + +``` +{ + "conf": { + "topology.message.timeout.secs": 3, + // etc + }, + "pidDir": "...", + "context": { + "task->component": { + "1": "example-spout", + "2": "__acker", + "3": "example-bolt1", + "4": "example-bolt2" + }, + "taskid": 3, + // Everything below this line is only available in Storm 0.10.0+ + "componentid": "example-bolt" + "stream->target->grouping": { + "default": { + "example-bolt2": { + "type": "SHUFFLE"}}}, + "streams": ["default"], + "stream->outputfields": {"default": ["word"]}, + "source->stream->grouping": { + "example-spout": { + "default": { + "type": "FIELDS", + "fields": ["word"] + } + } + } + "source->stream->fields": { + "example-spout": { + "default": ["word"] + } + } + } +} +``` + +Your script should create an empty file named with its PID in this directory. e.g. +the PID is 1234, so an empty file named 1234 is created in the directory. This +file lets the supervisor know the PID so it can shutdown the process later on. + +As of Storm 0.10.0, the context sent by Storm to shell components has been +enhanced substantially to include all aspects of the topology context available +to JVM components. One key addition is the ability to determine a shell +component's source and targets (i.e., inputs and outputs) in the topology via +the `stream->target->grouping` and `source->stream->grouping` dictionaries. At +the innermost level of these nested dictionaries, groupings are represented as +a dictionary that minimally has a `type` key, but can also have a `fields` key +to specify which fields are involved in a `FIELDS` grouping. + +* STDOUT: Your PID, in a JSON object, like `{"pid": 1234}`. The shell component will log the PID to its log. + +What happens next depends on the type of component: + +### Spouts + +Shell spouts are synchronous. The rest happens in a while(true) loop: + +* STDIN: Either a next, ack, activate, deactivate or fail command. + +"next" is the equivalent of ISpout's `nextTuple`. It looks like: + +``` +{"command": "next"} +``` + +"ack" looks like: + +``` +{"command": "ack", "id": "1231231"} +``` + +"activate" is the equivalent of ISpout's `activate`: +``` +{"command": "activate"} +``` + +"deactivate" is the equivalent of ISpout's `deactivate`: +``` +{"command": "deactivate"} +``` + +"fail" looks like: + +``` +{"command": "fail", "id": "1231231"} +``` + +* STDOUT: The results of your spout for the previous command. This can + be a sequence of emits and logs. + +An emit looks like: + +``` +{ + "command": "emit", + // The id for the tuple. Leave this out for an unreliable emit. The id can + // be a string or a number. + "id": "1231231", + // The id of the stream this tuple was emitted to. Leave this empty to emit to default stream. + "stream": "1", + // If doing an emit direct, indicate the task to send the tuple to + "task": 9, + // All the values in this tuple + "tuple": ["field1", 2, 3] +} +``` + +If not doing an emit direct, you will immediately receive the task ids to which the tuple was emitted on STDIN as a JSON array. + +A "log" will log a message in the worker log. It looks like: + +``` +{ + "command": "log", + // the message to log + "msg": "hello world!" +} +``` + +* STDOUT: a "sync" command ends the sequence of emits and logs. It looks like: + +``` +{"command": "sync"} +``` + +After you sync, ShellSpout will not read your output until it sends another next, ack, or fail command. + +Note that, similarly to ISpout, all of the spouts in the worker will be locked up after a next, ack, or fail, until you sync. Also like ISpout, if you have no tuples to emit for a next, you should sleep for a small amount of time before syncing. ShellSpout will not automatically sleep for you. + + +### Bolts + +The shell bolt protocol is asynchronous. You will receive tuples on STDIN as soon as they are available, and you may emit, ack, and fail, and log at any time by writing to STDOUT, as follows: + +* STDIN: A tuple! This is a JSON encoded structure like this: + +``` +{ + // The tuple's id - this is a string to support languages lacking 64-bit precision + "id": "-6955786537413359385", + // The id of the component that created this tuple + "comp": "1", + // The id of the stream this tuple was emitted to + "stream": "1", + // The id of the task that created this tuple + "task": 9, + // All the values in this tuple + "tuple": ["snow white and the seven dwarfs", "field2", 3] +} +``` + +* STDOUT: An ack, fail, emit, or log. Emits look like: + +``` +{ + "command": "emit", + // The ids of the tuples this output tuples should be anchored to + "anchors": ["1231231", "-234234234"], + // The id of the stream this tuple was emitted to. Leave this empty to emit to default stream. + "stream": "1", + // If doing an emit direct, indicate the task to send the tuple to + "task": 9, + // All the values in this tuple + "tuple": ["field1", 2, 3] +} +``` + +If not doing an emit direct, you will receive the task ids to which +the tuple was emitted on STDIN as a JSON array. Note that, due to the +asynchronous nature of the shell bolt protocol, when you read after +emitting, you may not receive the task ids. You may instead read the +task ids for a previous emit or a new tuple to process. You will +receive the task id lists in the same order as their corresponding +emits, however. + +An ack looks like: + +``` +{ + "command": "ack", + // the id of the tuple to ack + "id": "123123" +} +``` + +A fail looks like: + +``` +{ + "command": "fail", + // the id of the tuple to fail + "id": "123123" +} +``` + +A "log" will log a message in the worker log. It looks like: + +``` +{ + "command": "log", + // the message to log + "msg": "hello world!" +} +``` + +* Note that, as of version 0.7.1, there is no longer any need for a + shell bolt to 'sync'. + +### Handling Heartbeats (0.9.3 and later) + +As of Storm 0.9.3, heartbeats have been between ShellSpout/ShellBolt and their +multi-lang subprocesses to detect hanging/zombie subprocesses. Any libraries +for interfacing with Storm via multi-lang must take the following actions +regarding hearbeats: + +#### Spout + +Shell spouts are synchronous, so subprocesses always send `sync` commands at the +end of `next()`, so you should not have to do much to support heartbeats for +spouts. That said, you must not let subprocesses sleep more than the worker +timeout during `next()`. + +#### Bolt + +Shell bolts are asynchronous, so a ShellBolt will send heartbeat tuples to its +subprocess periodically. Heartbeat tuple looks like: + +``` +{ + "id": "-6955786537413359385", + "comp": "1", + "stream": "__heartbeat", + // this shell bolt's system task id + "task": -1, + "tuple": [] +} +``` + +When subprocess receives heartbeat tuple, it must send a `sync` command back to +ShellBolt. diff --git a/docs/NUMA.md b/docs/NUMA.md new file mode 100644 index 00000000000..9ba3225fd49 --- /dev/null +++ b/docs/NUMA.md @@ -0,0 +1,97 @@ +--- +title: NUMA Support +layout: documentation +documentation: true +--- + +# Table of Contents +1. [Introduction](#Introduction) +2. [Architecture](#Architecture) +3. [Resource Isolation Interface interaction](#RII-interaction) + 1. [CgroupManager](#Setting-Memory-Requirement) + 2. [DockerManager](#Setting-Shared-Memory) + 3. [RuncLibManager](#Setting-CPU-Requirement) +4. [Configuring NUMA](#Configuring-NUMA) + +
+ +## Introduction + +Non Uniform Memory Access ([NUMA](https://www.cc.gatech.edu/~echow/ipcc/hpc-course/HPC-numa.pdf)) is a new system architecture +where the hosts resources are grouped by cores and memory in NUMA zones. Storm supports isolating/pinning worker processes to specific +NUMA zones via the supervisor to take advantage of this resource isolation and avoid the penalty of using cross zone bus transfers +
+ +## Architecture + +Once Storm supervisors are configured for NUMA (see the section below) they now heartbeat multiple heartbeats - one for each NUMA zone. +Each of these NUMA supervisors has a supervisor id with the same prefixed supervisor id with the NUMA id differentiating them. +Nimbus, and by extension the scheduler, see these heartbeats and views the supervisor as multiple supervisors - one per configured NUMA zone. +Nimbus schedules topologies and assignments accordingly. The supervisor reads all assignments with the prefixed assignments and then +pins each worker to the NUMA zone according to the numa id in the assignment. The pinning depends on the Resource Isolation Interface used +and is elaborated on in the following section. + +
+ +### Resource Isolation Interface interaction + +Each implementation of the Resource Isolation Interface (RII) should now implement NUMA pinning. The following are the current/soon to be available +implementations + +#### CgroupManager + +The CgroupManager prefixes the worker launch command with the numactl command for Linux hosts- + +``` +numactl --cpunodebind=> --membind= +``` + +The worker is then bound to the NUMA zone's CPU cores and memory zone. +#### DockerManager + +Will be updated upon adding Docker support + +#### RuncLibManager + +Will be updated upon adding Runc support + +
+ +### Configuring NUMA + +In the Supervisor Config the following settings need to be set + +``` +supervisor.numa.meta: + "0": # Numa zone id + numa.cores: # Cores in NUMA zone (can be determined by using the numastat command) + - 0 + - 1 + - 2 + - 3 + - 4 + - 5 + - 12 + - 13 + - 14 + - 15 + - 16 + + numa.generic.resources.map: # Generic Resources in the zone to be used for generic resource scheduling (optional) + network.resource.units: 50.0 + + numa.memory.mb: 42461 # Size of memory zone + numa.ports: # Ports to be assigned to workers pinned to the NUMA zone (this may include ports not specified in SUPERVISOR_SLOTS_PORTS + - 6700 + - 6701 + - 6702 + - 6703 + - 6704 + - 6705 + - 6706 + - 6707 + - 6708 + - 6709 + - 6710 + - 6711 +``` diff --git a/docs/OCI-support.md b/docs/OCI-support.md new file mode 100644 index 00000000000..a44959ea1c3 --- /dev/null +++ b/docs/OCI-support.md @@ -0,0 +1,1710 @@ +--- +title: OCI/Squashfs Runtime +layout: documentation +documentation: true +--- + +# OCI/Squashfs Runtime for Workers Running in Containers + +OCI/Squashfs is a container runtime that allows topologies to run inside docker containers. However, unlike the existing +Docker runtime, the images are fetched from HDFS rather than from the Docker registry or requiring images to be pre-loaded +into Docker on each node. Docker does not need to be installed on the nodes in order for this runtime to work. + +Note: This has only been tested on RHEL7. + +## Motivation + +#### Docker runtime drawbacks +Using the current Docker runtime (see [Docker-support.md](Docker-support.md#Docker-Support) ) has some drawbacks: + +##### Docker Daemons Dependency + +The Docker daemons `dockerd` and `containerd` must be running on the system in order for the Docker runtime to function. +And these daemons can get out of sync which could cause nontrivial issues to the containers. + +##### Docker Registry Issues at Scale + +Using the Docker runtime on a large scale Storm cluster can overwhelm the Docker registry. In practice this requires +admins to pre-load a Docker image on all the cluster nodes in a controlled fashion before a large job requesting +the image can run. + +##### Image Costs in Time and Space + +Docker stores each image layer as a tar.gz archive. In order to use the layer, the compressed archive must be unpacked +into the node's filesystem. This can consume significant disk space, especially when the reliable image store location +capacity is relatively small. In addition, unpacking an image layer takes time, especially when the layer is large or +contains thousands of files. This additional time for unpacking delays container launch beyond the time needed to transfer +the layer data over the network. + +#### OCI/Squashfs Runtime advantages + +The OCI/Squashfs runtime avoids the drawback listed above in the following ways. + +##### No Docker dependencies on The Node + +Docker does not need to be installed on each node, nor is there a dependency on a daemon or service that needs to be started +by an admin before containers can be launched. All that is required to be present on each node is an OCI-compatible runtime like +`runc`. + +##### Leverages Distributed File Systems For Scale + +Image can be fetched via HDFS or other distributed file systems instead of the Docker registry. This prevents a large cluster from +overwhelming a Docker registry when a big topology causes all of the nodes to request an image at once. This also allows large clusters +to run topologies more dynamically, as images would not need to be pre-loaded by admins on each node to prevent a large Docker registry +image request storm. + +##### Smaller, Faster images on The Node + +The new runtime handles layer localization directly, so layer formats other than tar archive can be supported. For example, each image layer +can be converted to squashfs images as part of copying the layers to HDFS. squashfs is a file system optimized for running directly on a +compressed image. With squashfs layers the layer data can remain compressed on the node saving disk space. Container launch after layer +localization is also faster, as the layers no longer need to be unpacked into a directory to become usable. + + +## Prerequisite + +First you need to use the`docker-to-squash.py` script to download docker images and configs, convert layers to squashfs files and put them to a directory in HDFS, for example + +```bash +python3 docker-to-squash.py pull-build-push-update --hdfs-root hdfs://hostname:port/containers \ + docker.xxx.com:4443/hadoop-user-images/storm/rhel7:20201202-232133,storm/rhel7:dev_current --log DEBUG --bootstrap +``` + +With this command, all the layers belonging to this image will be converted to squashfs files and be placed under `./layers` directory +under the directory specified by `--hdfs-root`; +the manifest of this image will be placed under `./manifests` directory with the name as the sha256 value of the manifest content; +the config of this image will be placed under `./config` directory with the name as the sha256 value of the config content; +the mapping from the image tag to the sha256 value of the manifest will be written to the "./image-tag-to-manifest-file". + +Note that `--hdfs-root` can be any directory on HDFS, as long as it matches with the `storm.oci.image.hdfs.toplevel.dir` config. + +##### Example + +For example, the directory structure is like this: + +```bash +-bash-4.2$ hdfs dfs -ls /containers/* +Found 1 items +-r--r--r-- 3 hdfsqa hadoop 7877 2020-12-04 14:29 /containers/config/ef1ff2c7167a1a6cd01e106f51b84a4d400611ba971c53cbc28de7919515ca4e +-r--r--r-- 3 hdfsqa hadoop 160 2020-12-04 14:30 /containers/image-tag-to-hash +Found 7 items +-r--r--r-- 3 hdfsqa hadoop 84697088 2020-12-04 14:28 /containers/layers/152ee1d2cccea9dfe6393d2bdf9d077b67616b2b417b25eb74fc5ffaadcb96f5.sqsh +-r--r--r-- 3 hdfsqa hadoop 545267712 2020-12-04 14:28 /containers/layers/18ee671016a1bf3ecab07395d93c2cbecd352d59c497a1551e2074d64e1098d9.sqsh +-r--r--r-- 3 hdfsqa hadoop 12906496 2020-10-06 15:24 /containers/layers/1b73e9433ecca0a6bb152bd7525f2b7c233484d51c24f8a6ba483d5cfd3035dc.sqsh +-r--r--r-- 3 hdfsqa hadoop 4096 2020-12-04 14:29 /containers/layers/344224962010c03c9ca1f11a9bff0dfcc296ac46d0a55e4ff30a0ad13b9817af.sqsh +-r--r--r-- 3 hdfsqa hadoop 26091520 2020-10-06 15:22 /containers/layers/3692c3483ef6516fba685b316448e8aaf0fc10bb66818116edc8e5e6800076c7.sqsh +-r--r--r-- 3 hdfsqa hadoop 4096 2020-12-04 14:29 /containers/layers/8710a3d72f75b45c48ab6b9b67eb6d77caea3dac91a0c30e0831f591cba4887e.sqsh +-r--r--r-- 3 hdfsqa hadoop 121122816 2020-10-06 15:23 /containers/layers/ea067172a7138f035d89a5c378db6d66c1581d98b0497b21f256e04c3d2b5303.sqsh +Found 1 items +-r--r--r-- 3 hdfsqa hadoop 1793 2020-12-04 14:29 /containers/manifests/26fd443859325d5911f3be5c5e231dddca88ee0d526456c0c92dd794148d8585 +``` + +The `image-tag-to-manifest-file`: +```bash +-bash-4.2$ hdfs dfs -cat /containers/image-tag-to-hash +storm/rhel7:dev_current:26fd443859325d5911f3be5c5e231dddca88ee0d526456c0c92dd794148d8585#docker.xxx.com:4443/hadoop-user-images/storm/rhel7:20201202-232133 +``` + +The manifest file `26fd443859325d5911f3be5c5e231dddca88ee0d526456c0c92dd794148d8585`: +```json +{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": { + "mediaType": "application/vnd.docker.container.image.v1+json", + "size": 7877, + "digest": "sha256:ef1ff2c7167a1a6cd01e106f51b84a4d400611ba971c53cbc28de7919515ca4e" + }, + "layers": [ + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 26858854, + "digest": "sha256:3692c3483ef6516fba685b316448e8aaf0fc10bb66818116edc8e5e6800076c7" + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 123300113, + "digest": "sha256:ea067172a7138f035d89a5c378db6d66c1581d98b0497b21f256e04c3d2b5303" + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 12927624, + "digest": "sha256:1b73e9433ecca0a6bb152bd7525f2b7c233484d51c24f8a6ba483d5cfd3035dc" + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 567401434, + "digest": "sha256:18ee671016a1bf3ecab07395d93c2cbecd352d59c497a1551e2074d64e1098d9" + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 85748864, + "digest": "sha256:152ee1d2cccea9dfe6393d2bdf9d077b67616b2b417b25eb74fc5ffaadcb96f5" + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 186, + "digest": "sha256:344224962010c03c9ca1f11a9bff0dfcc296ac46d0a55e4ff30a0ad13b9817af" + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 156, + "digest": "sha256:8710a3d72f75b45c48ab6b9b67eb6d77caea3dac91a0c30e0831f591cba4887e" + } + ] +} +``` + +And the config file `ef1ff2c7167a1a6cd01e106f51b84a4d400611ba971c53cbc28de7919515ca4e` (some of the content is omitted): +```json +{ + "architecture": "amd64", + "config": { + "Hostname": "", + "Domainname": "", + "User": "root", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "X_SCLS=rh-git218", + "LD_LIBRARY_PATH=/opt/rh/httpd24/root/usr/lib64", + "PATH=/opt/rh/rh-git218/root/usr/bin:/home/y/bin64:/home/y/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/home/y/share/yjava_jdk/java/bin", + "PERL5LIB=/opt/rh/rh-git218/root/usr/share/perl5/vendor_perl", + "LANG=en_US.UTF-8", + "LANGUAGE=en_US:en", + "LC_ALL=en_US.UTF-8", + "JAVA_HOME=/home/y/share/yjava_jdk/java" + ], + "Cmd": [ + "/bin/bash" + ], + "Image": "sha256:6977cd0735c96d14248e834f775373e40230c134b70f10163c05ce6c6c8873ca", + "Volumes": null, + "WorkingDir": "", + "Entrypoint": null, + "OnBuild": null, + "Labels": { + "name": "xxxxx" + } + }, + "container": "344ff1084dea3e0501a0d426e52c43cd589d6b29f33ab0915b7be8906b9aec41", + "container_config": { + "Hostname": "344ff1084dea", + "Domainname": "", + "User": "root", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "X_SCLS=rh-git218", + "LD_LIBRARY_PATH=/opt/rh/httpd24/root/usr/lib64", + "PATH=/opt/rh/rh-git218/root/usr/bin:/home/y/bin64:/home/y/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/home/y/share/yjava_jdk/java/bin", + "PERL5LIB=/opt/rh/rh-git218/root/usr/share/perl5/vendor_perl", + "LANG=en_US.UTF-8", + "LANGUAGE=en_US:en", + "LC_ALL=en_US.UTF-8", + "JAVA_HOME=/home/y/share/yjava_jdk/java" + ], + "Cmd": [ + "/bin/sh", + "-c" + ], + "Image": "sha256:6977cd0735c96d14248e834f775373e40230c134b70f10163c05ce6c6c8873ca", + "Volumes": null, + "WorkingDir": "", + "Entrypoint": null, + "OnBuild": null, + "Labels": { + "name": "xxxxx" + } + }, + "created": "2020-12-02T23:25:47.354704574Z", + "docker_version": "19.03.8", + "history": [ + { + "created": "2020-02-18T21:43:36.934503462Z", + "created_by": "/bin/sh" + }, + { + "created": "2020-02-18T21:45:05.729764427Z", + "created_by": "/bin/sh" + }, + { + "created": "2020-02-18T21:46:36.638896031Z", + "created_by": "/bin/sh" + }, + { + "created": "2020-12-02T23:21:54.595662813Z", + "created_by": "/bin/sh -c #(nop) USER root", + "empty_layer": true + }, + { + "created": "2020-12-02T23:25:45.822235539Z", + "created_by": "/bin/sh -c /opt/python/bin/pip3.6 install --no-cache-dir numpy scipy pandas requests setuptools scikit-learn matplotlib" + }, + { + "created": "2020-12-02T23:25:46.708884538Z", + "created_by": "/bin/sh -c #(nop) ENV JAVA_HOME=/home/y/share/yjava_jdk/java", + "empty_layer": true + }, + { + "created": "2020-12-02T23:25:46.770226108Z", + "created_by": "/bin/sh -c #(nop) ENV PATH=/opt/rh/rh-git218/root/usr/bin:/home/y/bin64:/home/y/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/home/y/share/yjava_jdk/java/bin", + "empty_layer": true + }, + { + "created": "2020-12-02T23:25:46.837263533Z", + "created_by": "/bin/sh -c #(nop) COPY file:33283617fbd796b25e53eaf4d26012eea1f610ff9acc0706f11281e86be440dc in /etc/krb5.conf " + }, + { + "created": "2020-12-02T23:25:47.237515768Z", + "created_by": "/bin/sh -c echo '7.7.4' \u003e /etc/hadoop-dockerfile-version" + } + ], + "os": "linux", + "rootfs": { + "type": "layers", + "diff_ids": [ + "sha256:9f627fdb0292afbe5e2eb96edc1b3a5d3a8f468e3acf1d29f1509509285c7341", + "sha256:83d2667f9458eaf719588a96bb63f2520bd377d29d52f6dbd4ff13c819c08037", + "sha256:fcba5f49eef4f3d77d3e73e499a1a4e1914b3f20d903625d27c0aa3ab82f41a3", + "sha256:3bd4567d0726f5d6560b548bc0c0400e868f6a27067887a36edd7e8ceafff96c", + "sha256:ad56900a1f10e6ef96f17c7e8019384540ab1b34ccce6bda06675473b08d787e", + "sha256:ac0a645609f957ab9c4a8a62f8646e99f09a74ada54ed2eaca204c6e183c9ae8", + "sha256:9bf10102fc145156f4081c2cacdbadab5816dce4f88eb02881ab739239d316e6" + ] + } +} +``` + +Note: To use the `docker-to-squash.py`, you need to install [skopeo](https://github.com/containers/skopeo), [jq](https://stedolan.github.io/jq/) and squashfs-tools. + + +## Configurations + +Then you need to set up storm with the following configs: + +| Setting | Description | +|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `storm.resource.isolation.plugin.enable` | set to `true` to enable isolation plugin. `storm.resource.isolation.plugin` determines which plugin to use. If this is set to `false`, `org.apache.storm.container.DefaultResourceIsolationManager` will be used. | +| `storm.resource.isolation.plugin` | set to `"org.apache.storm.container.oci.RuncLibContainerManager"` to enable OCI/Squash runtime support | +| `storm.oci.allowed.images` | An allowlist of docker images that can be used. Users can only choose a docker image from the list. +| `storm.oci.image` | The default docker image to be used if user doesn't specify which image to use. And it must belong to the `storm.oci.allowed.images` +| `topology.oci.image` | Topologies can specify which image to use. It must belong to the `storm.oci.allowed.images` | +| `storm.oci.cgroup.root` | The root path of cgroup for docker to use. On RHEL7, it should be "/sys/fs/cgroup". +| `storm.oci.cgroup.parent` | --cgroup-parent config for docker command. It must follow the constraints of docker commands. The path will be made as absolute path if it's a relative path because we saw some weird bugs ((the cgroup memory directory disappears after a while) when a relative path is used. +| `storm.oci.readonly.bindmounts` | A list of read only bind mounted directories. +| `storm.oci.readwrite.bindmounts` | A list of read-write bind mounted directories. +| `storm.oci.nscd.dir` | The directory of nscd (name service cache daemon), e.g. "/var/run/nscd/". nscd must be running so that profiling can work properly. +| `storm.oci.seccomp.profile` | Specify the seccomp Json file to be used as a seccomp filter +| `supervisor.worker.launcher` | Full path to the worker-launcher executable. +| `storm.oci.image.hdfs.toplevel.dir` | The HDFS location under which the oci image manifests, layers and configs directories exist. +| `storm.oci.image.tag.to.manifest.plugin` | The plugin to be used to get the image-tag to manifest mappings. +| `storm.oci.local.or.hdfs.image.tag.to.manifest.plugin.hdfs.hash.file` | The hdfs location of the image-tag to manifest mapping file. If `org.apache.storm.container.oci.LocalOrHdfsImageTagToManifestPlugin` is used as `storm.oci.image.tag.to.manifest.plugin`, either `storm.oci.local.or.hdfs.image.tag.to.manifest.plugin.hdfs.hash.file` or `storm.oci.local.or.hdfs.image.tag.to.manifest.plugin.local.hash.file` needs to be configured. +| `storm.oci.local.or.hdfs.image.tag.to.manifest.plugin.local.hash.file` | The local file system location where the image-tag to manifest mapping file exists. If `org.apache.storm.container.oci.LocalOrHdfsImageTagToManifestPlugin` is used as `storm.oci.image.tag.to.manifest.plugin`, either `storm.oci.local.or.hdfs.image.tag.to.manifest.plugin.hdfs.hash.file` or `storm.oci.local.or.hdfs.image.tag.to.manifest.plugin.local.hash.file` needs to be configured. +| `storm.oci.local.or.hdfs.image.tag.to.manifest.plugin.cache.refresh.interval.secs` | The interval in seconds between refreshing the image-tag to manifest mapping cache, used by `org.apache.storm.container.oci.LocalOrHdfsImageTagToManifestPlugin`.| +| `storm.oci.local.or.hdfs.image.tag.to.manifest.plugin.num.manifests.to.cache` | The number of manifests to cache, used by `org.apache.storm.container.oci.LocalOrHdfsImageTagToManifestPlugin`.| +| `storm.oci.manifest.to.resources.plugin` | The plugin to be used to get oci resource according to the manifest. +| `storm.oci.resources.localizer` | The plugin to use for oci resources localization. | +| `storm.oci.resources.local.dir` | The local directory for localized oci resources. | + +For example, +```bash +storm.resource.isolation.plugin: "org.apache.storm.container.oci.RuncLibContainerManager" + +storm.oci.allowed.images: + - "storm/rhel7:dev_current" + - "storm/rhel7:dev_previous" + - "storm/rhel7:dev_test" +storm.oci.image: "storm/rhel7:dev_current" + +storm.oci.cgroup.parent: "/storm" +storm.oci.cgroup.root: "/sys/fs/cgroup" +storm.oci.image.hdfs.toplevel.dir: "hdfs://host:port/containers/" +storm.oci.image.tag.to.manifest.plugin: "org.apache.storm.container.oci.LocalOrHdfsImageTagToManifestPlugin" +storm.oci.local.or.hdfs.image.tag.to.manifest.plugin.hdfs.hash.file: "hdfs://host:port/containers/image-tag-to-hash" +storm.oci.manifest.to.resources.plugin: "org.apache.storm.container.oci.HdfsManifestToResourcesPlugin" +storm.oci.readonly.bindmounts: + - "/home/y/lib64/storm" + - "/etc/krb5.conf" + +storm.oci.resources.localizer: "org.apache.storm.container.oci.HdfsOciResourcesLocalizer" +storm.oci.seccomp.profile: "/home/y/conf/storm/seccomp.json" +``` + +To use built-in plugins from `external/storm-hdfs-oci`, you need to build `external/storm-hdfs-oci` and copy `storm-hdfs-oci.jar` and its dependencies to the `extlib-daemon` directory. + +Additionally, if you want to access to secure hdfs, you also need to set the following configs. +``` +storm.hdfs.login.keytab +storm.hdfs.login.principal +``` + +For example, +``` +storm.hdfs.login.keytab: /etc/keytab +storm.hdfs.login.principal: primary/instance@REALM +``` + +## Implementation + +##### Launch a container + +The supervisor calls RuncLibContainerManager to launch the container and the worker inside the container. It will first call the `storm.oci.image.tag.to.manifest.plugin` +to fetch the mapping of image tag to manifest. Then it calls `storm.oci.manifest.to.resources.plugin` to get the list of resources to be downloaded and invokes +`storm.oci.resources.localizer` to download the config of the image and the layers of the image to a local directory. It then composes a `oci-config.json` (see example in Appendix) and +invokes worker-launcher to launch the container. + +The worker-launcher parses the `oci-config.json` file and do some necessary initialization and set up. It then creates /run/worker-launcher/layers/xxx/mnt directories +and associate them with loopback devices, for example: + +```bash +-bash-4.2$ cat /proc/mounts +... +/dev/loop3 /run/worker-launcher/layers/f7452c2657900c53da1a4f7e430485a267b89c7717466ee61ffefba85f690226/mnt squashfs ro,relatime 0 0 +/dev/loop4 /run/worker-launcher/layers/8156da43228752c7364b71dabba6aef6bd1cc081e9ea59cf92ea0f79fd8a50b6/mnt squashfs ro,relatime 0 0 +/dev/loop5 /run/worker-launcher/layers/c7c9b1d6df043edf307c49d75c7d2bc3df72f8dcaf7d17b733c97022387902e6/mnt squashfs ro,relatime 0 0 +/dev/loop6 /run/worker-launcher/layers/f0d08d5707855b02def8ac622a6c60203b380e31c6c237e5b691f5856594a3e7/mnt squashfs ro,relatime 0 0 +/dev/loop11 /run/worker-launcher/layers/34b0bc9c446a9be565fb50b04db1e9d1c1c4d14a22a885a7aba6981748b6635e/mnt squashfs ro,relatime 0 0 +/dev/loop12 /run/worker-launcher/layers/0ba001c025aa172a7d630914c75c1772228606f622e2c9d46a8fedf10774623e/mnt squashfs ro,relatime 0 0 +/dev/loop13 /run/worker-launcher/layers/a5e4e615565081e04eaf4c5ab5b20d37de271db704fc781c7b1e07c5dcdf96e5/mnt squashfs ro,relatime 0 0 +... + +``` + +Then it mounts the layers, for example: +```bash +-bash-4.2$ mount +... +/home/y/var/storm/supervisor/oci-resources/layers/3692c3483ef6516fba685b316448e8aaf0fc10bb66818116edc8e5e6800076c7.sqsh on /run/worker-launcher/layers/f7452c2657900c53da1a4f7e430485a267b89c7717466ee61ffefba85f690226/mnt type squashfs (ro,relatime) +/home/y/var/storm/supervisor/oci-resources/layers/ea067172a7138f035d89a5c378db6d66c1581d98b0497b21f256e04c3d2b5303.sqsh on /run/worker-launcher/layers/8156da43228752c7364b71dabba6aef6bd1cc081e9ea59cf92ea0f79fd8a50b6/mnt type squashfs (ro,relatime) +/home/y/var/storm/supervisor/oci-resources/layers/1b73e9433ecca0a6bb152bd7525f2b7c233484d51c24f8a6ba483d5cfd3035dc.sqsh on /run/worker-launcher/layers/c7c9b1d6df043edf307c49d75c7d2bc3df72f8dcaf7d17b733c97022387902e6/mnt type squashfs (ro,relatime) +/home/y/var/storm/supervisor/oci-resources/layers/18ee671016a1bf3ecab07395d93c2cbecd352d59c497a1551e2074d64e1098d9.sqsh on /run/worker-launcher/layers/f0d08d5707855b02def8ac622a6c60203b380e31c6c237e5b691f5856594a3e7/mnt type squashfs (ro,relatime) +/home/y/var/storm/supervisor/oci-resources/layers/152ee1d2cccea9dfe6393d2bdf9d077b67616b2b417b25eb74fc5ffaadcb96f5.sqsh on /run/worker-launcher/layers/34b0bc9c446a9be565fb50b04db1e9d1c1c4d14a22a885a7aba6981748b6635e/mnt type squashfs (ro,relatime) +/home/y/var/storm/supervisor/oci-resources/layers/344224962010c03c9ca1f11a9bff0dfcc296ac46d0a55e4ff30a0ad13b9817af.sqsh on /run/worker-launcher/layers/0ba001c025aa172a7d630914c75c1772228606f622e2c9d46a8fedf10774623e/mnt type squashfs (ro,relatime) +/home/y/var/storm/supervisor/oci-resources/layers/8710a3d72f75b45c48ab6b9b67eb6d77caea3dac91a0c30e0831f591cba4887e.sqsh on /run/worker-launcher/layers/a5e4e615565081e04eaf4c5ab5b20d37de271db704fc781c7b1e07c5dcdf96e5/mnt type squashfs (ro,relatime) +... +``` + +It creates the rootfs and mount the overlay filesystem (with lowerdir,upperdir,workdir) for the worker with the command +```bash +mount -t overlay overlay -o lowerdir=/lower1:/lower2:/lower3,upperdir=/upper,workdir=/work /merged +``` + +```bash +-bash-4.2$ mount +... +overlay on /run/worker-launcher/6703-1a23ca4b-6062-4d08-8ac3-b09e7d35e7cb/rootfs type overlay (rw,relatime,lowerdir=/run/worker-launcher/layers/a5e4e615565081e04eaf4c5ab5b20d37de271db704fc781c7b1e07c5dcdf96e5/mnt:/run/worker-launcher/layers/0ba001c025aa172a7d630914c75c1772228606f622e2c9d46a8fedf10774623e/mnt:/run/worker-launcher/layers/34b0bc9c446a9be565fb50b04db1e9d1c1c4d14a22a885a7aba6981748b6635e/mnt:/run/worker-launcher/layers/f0d08d5707855b02def8ac622a6c60203b380e31c6c237e5b691f5856594a3e7/mnt:/run/worker-launcher/layers/c7c9b1d6df043edf307c49d75c7d2bc3df72f8dcaf7d17b733c97022387902e6/mnt:/run/worker-launcher/layers/8156da43228752c7364b71dabba6aef6bd1cc081e9ea59cf92ea0f79fd8a50b6/mnt:/run/worker-launcher/layers/f7452c2657900c53da1a4f7e430485a267b89c7717466ee61ffefba85f690226/mnt,upperdir=/run/worker-launcher/6703-1a23ca4b-6062-4d08-8ac3-b09e7d35e7cb/upper,workdir=/run/worker-launcher/6703-1a23ca4b-6062-4d08-8ac3-b09e7d35e7cb/work) +... +``` + +It then produce a `config.json` (see example at Appendix) under `/home/y/var/storm/workers/1a23ca4b-6062-4d08-8ac3-b09e7d35e7cb` directory and launch the container with +the command +```bash +/usr/bin/runc run -d \ + --pid-file /home/y/var/storm/workers/1a23ca4b-6062-4d08-8ac3-b09e7d35e7cb/artifacts/container-1a23ca4b-6062-4d08-8ac3-b09e7d35e7cb.pid \ + -b /home/y/var/storm/workers/1a23ca4b-6062-4d08-8ac3-b09e7d35e7cb \ + 6703-1a23ca4b-6062-4d08-8ac3-b09e7d35e7cb +``` + +##### Kill a container + +To kill a container, `RuncLibContainerManager` sends the `SIGTERM` or `SIGKILL` signal to the container process. It then invokes worker-launcher to to umount the mounts and clean up the directories. +The worker-launcher will invoke `runc delete container-id` to delete the container at the end. + + +## Profile the processes inside the container +If you have sudo permission, you can also run `sudo nsenter --target --pid --mount --setuid --setgid ` to enter the container. +Then you can run `jstack`, `jmap` etc inside the container. `` is the pid of the container process on the host. +`` can be obtained by running `runc list` command. + +## Seccomp security profiles + +You can set `storm.oci.seccomp.profile` to restrict the actions available within the container. If it's not set, the container runs without +restrictions. You can use `conf/seccomp.json.example` provided or you can specify our own `seccomp.json` file. + + +## Appendix + +##### Example oci-config.json file +```json +{ + "version": "0.1", + "username": "username1", + "containerId": "6703-1a23ca4b-6062-4d08-8ac3-b09e7d35e7cb", + "pidFile": "/home/y/var/storm/workers/1a23ca4b-6062-4d08-8ac3-b09e7d35e7cb/artifacts/container-1a23ca4b-6062-4d08-8ac3-b09e7d35e7cb.pid", + "containerScriptPath": "/home/y/var/storm/workers/1a23ca4b-6062-4d08-8ac3-b09e7d35e7cb/storm-worker-script.sh", + "layers": [ + { + "mediaType": "application/vnd.squashfs", + "path": "/home/y/var/storm/supervisor/oci-resources/layers/3692c3483ef6516fba685b316448e8aaf0fc10bb66818116edc8e5e6800076c7.sqsh" + }, + { + "mediaType": "application/vnd.squashfs", + "path": "/home/y/var/storm/supervisor/oci-resources/layers/ea067172a7138f035d89a5c378db6d66c1581d98b0497b21f256e04c3d2b5303.sqsh" + }, + { + "mediaType": "application/vnd.squashfs", + "path": "/home/y/var/storm/supervisor/oci-resources/layers/1b73e9433ecca0a6bb152bd7525f2b7c233484d51c24f8a6ba483d5cfd3035dc.sqsh" + }, + { + "mediaType": "application/vnd.squashfs", + "path": "/home/y/var/storm/supervisor/oci-resources/layers/18ee671016a1bf3ecab07395d93c2cbecd352d59c497a1551e2074d64e1098d9.sqsh" + }, + { + "mediaType": "application/vnd.squashfs", + "path": "/home/y/var/storm/supervisor/oci-resources/layers/152ee1d2cccea9dfe6393d2bdf9d077b67616b2b417b25eb74fc5ffaadcb96f5.sqsh" + }, + { + "mediaType": "application/vnd.squashfs", + "path": "/home/y/var/storm/supervisor/oci-resources/layers/344224962010c03c9ca1f11a9bff0dfcc296ac46d0a55e4ff30a0ad13b9817af.sqsh" + }, + { + "mediaType": "application/vnd.squashfs", + "path": "/home/y/var/storm/supervisor/oci-resources/layers/8710a3d72f75b45c48ab6b9b67eb6d77caea3dac91a0c30e0831f591cba4887e.sqsh" + } + ], + "reapLayerKeepCount": 100, + "ociRuntimeConfig": { + "mounts": [ + { + "destination": "/home/y/lib64/storm", + "type": "bind", + "source": "/home/y/lib64/storm", + "options": [ + "ro", + "rbind", + "rprivate" + ] + }, + { + "destination": "/etc/krb5.conf", + "type": "bind", + "source": "/etc/krb5.conf", + "options": [ + "ro", + "rbind", + "rprivate" + ] + }, + { + "destination": "/etc/resolv.conf", + "type": "bind", + "source": "/etc/resolv.conf", + "options": [ + "ro", + "rbind", + "rprivate" + ] + }, + { + "destination": "/etc/hostname", + "type": "bind", + "source": "/etc/hostname", + "options": [ + "ro", + "rbind", + "rprivate" + ] + }, + { + "destination": "/etc/hosts", + "type": "bind", + "source": "/etc/hosts", + "options": [ + "ro", + "rbind", + "rprivate" + ] + }, + { + "destination": "/var/run/nscd", + "type": "bind", + "source": "/var/run/nscd", + "options": [ + "ro", + "rbind", + "rprivate" + ] + }, + { + "destination": "/sys/fs/cgroup", + "type": "bind", + "source": "/sys/fs/cgroup", + "options": [ + "ro", + "rbind", + "rprivate" + ] + }, + { + "destination": "/home/y/var/storm/supervisor", + "type": "bind", + "source": "/home/y/var/storm/supervisor", + "options": [ + "ro", + "rbind", + "rprivate" + ] + }, + { + "destination": "/home/y/var/storm/workers/1a23ca4b-6062-4d08-8ac3-b09e7d35e7cb", + "type": "bind", + "source": "/home/y/var/storm/workers/1a23ca4b-6062-4d08-8ac3-b09e7d35e7cb", + "options": [ + "rw", + "rbind", + "rprivate" + ] + }, + { + "destination": "/home/y/var/storm/workers-artifacts/wc1-2-1608581491/6703", + "type": "bind", + "source": "/home/y/var/storm/workers-artifacts/wc1-2-1608581491/6703", + "options": [ + "rw", + "rbind", + "rprivate" + ] + }, + { + "destination": "/home/y/var/storm/workers-users/1a23ca4b-6062-4d08-8ac3-b09e7d35e7cb", + "type": "bind", + "source": "/home/y/var/storm/workers-users/1a23ca4b-6062-4d08-8ac3-b09e7d35e7cb", + "options": [ + "rw", + "rbind", + "rprivate" + ] + }, + { + "destination": "/home/y/var/storm/supervisor/stormdist/wc1-2-1608581491/shared_by_topology", + "type": "bind", + "source": "/home/y/var/storm/supervisor/stormdist/wc1-2-1608581491/shared_by_topology", + "options": [ + "rw", + "rbind", + "rprivate" + ] + }, + { + "destination": "/tmp", + "type": "bind", + "source": "/home/y/var/storm/workers/1a23ca4b-6062-4d08-8ac3-b09e7d35e7cb/tmp", + "options": [ + "rw", + "rbind", + "rprivate" + ] + } + ], + "process": { + "cwd": "/home/y/var/storm/workers/1a23ca4b-6062-4d08-8ac3-b09e7d35e7cb", + "env": [ + "X_SCLS=rh-git218", + "LD_LIBRARY_PATH=/opt/rh/httpd24/root/usr/lib64", + "PATH=/opt/rh/rh-git218/root/usr/bin:/home/y/bin64:/home/y/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/home/y/share/yjava_jdk/java/bin", + "PERL5LIB=/opt/rh/rh-git218/root/usr/share/perl5/vendor_perl", + "LANG=en_US.UTF-8", + "LANGUAGE=en_US:en", + "LC_ALL=en_US.UTF-8", + "JAVA_HOME=/home/y/share/yjava_jdk/java", + "LD_LIBRARY_PATH=/home/y/var/storm/supervisor/stormdist/wc1-2-1608581491/resources/Linux-amd64:/home/y/var/storm/supervisor/stormdist/wc1-2-1608581491/resources:/home/y/lib64:/usr/local/lib64:/usr/lib64:/lib64:" + ], + "args": [ + "bash", + "/home/y/var/storm/workers/1a23ca4b-6062-4d08-8ac3-b09e7d35e7cb/storm-worker-script.sh" + ] + }, + "linux": { + "cgroupsPath": "/storm/6703-1a23ca4b-6062-4d08-8ac3-b09e7d35e7cb", + "resources": { + "cpu": { + "quota": 140000, + "period": 100000 + } + }, + "seccomp": { + "defaultAction": "SCMP_ACT_ERRNO", + "syscalls": [ + { + "names": [ + "accept", + "accept4", + "access", + "alarm", + "alarm", + "bind", + "brk", + "capget", + "capset", + "chdir", + "chmod", + "chown", + "chown32", + "clock_getres", + "clock_gettime", + "clock_nanosleep", + "close", + "connect", + "copy_file_range", + "creat", + "dup", + "dup2", + "dup3", + "epoll_create", + "epoll_create1", + "epoll_ctl", + "epoll_ctl_old", + "epoll_pwait", + "epoll_wait", + "epoll_wait_old", + "eventfd", + "eventfd2", + "execve", + "execveat", + "exit", + "exit_group", + "faccessat", + "fadvise64", + "fadvise64_64", + "fallocate", + "fanotify_mark", + "fchdir", + "fchmod", + "fchmodat", + "fchown", + "fchown32", + "fchownat", + "fcntl", + "fcntl64", + "fdatasync", + "fgetxattr", + "flistxattr", + "flock", + "fork", + "fremovexattr", + "fsetxattr", + "fstat", + "fstat64", + "fstatat64", + "fstatfs", + "fstatfs64", + "fsync", + "ftruncate", + "ftruncate64", + "futex", + "futimesat", + "getcpu", + "getcwd", + "getdents", + "getdents64", + "getegid", + "getegid32", + "geteuid", + "geteuid32", + "getgid", + "getgid32", + "getgroups", + "getgroups32", + "getitimer", + "getpeername", + "getpgid", + "getpgrp", + "getpid", + "getppid", + "getpriority", + "getrandom", + "getresgid", + "getresgid32", + "getresuid", + "getresuid32", + "getrlimit", + "get_robust_list", + "getrusage", + "getsid", + "getsockname", + "getsockopt", + "get_thread_area", + "gettid", + "gettimeofday", + "getuid", + "getuid32", + "getxattr", + "inotify_add_watch", + "inotify_init", + "inotify_init1", + "inotify_rm_watch", + "io_cancel", + "ioctl", + "io_destroy", + "io_getevents", + "ioprio_get", + "ioprio_set", + "io_setup", + "io_submit", + "ipc", + "kill", + "lchown", + "lchown32", + "lgetxattr", + "link", + "linkat", + "listen", + "listxattr", + "llistxattr", + "_llseek", + "lremovexattr", + "lseek", + "lsetxattr", + "lstat", + "lstat64", + "madvise", + "mbind", + "memfd_create", + "mincore", + "mkdir", + "mkdirat", + "mknod", + "mknodat", + "mlock", + "mlock2", + "mlockall", + "mmap", + "mmap2", + "mprotect", + "mq_getsetattr", + "mq_notify", + "mq_open", + "mq_timedreceive", + "mq_timedsend", + "mq_unlink", + "mremap", + "msgctl", + "msgget", + "msgrcv", + "msgsnd", + "msync", + "munlock", + "munlockall", + "munmap", + "nanosleep", + "newfstatat", + "_newselect", + "open", + "openat", + "pause", + "pipe", + "pipe2", + "poll", + "ppoll", + "prctl", + "pread64", + "preadv", + "prlimit64", + "pselect6", + "pwrite64", + "pwritev", + "read", + "readahead", + "readlink", + "readlinkat", + "readv", + "recv", + "recvfrom", + "recvmmsg", + "recvmsg", + "remap_file_pages", + "removexattr", + "rename", + "renameat", + "renameat2", + "restart_syscall", + "rmdir", + "rt_sigaction", + "rt_sigpending", + "rt_sigprocmask", + "rt_sigqueueinfo", + "rt_sigreturn", + "rt_sigsuspend", + "rt_sigtimedwait", + "rt_tgsigqueueinfo", + "sched_getaffinity", + "sched_getattr", + "sched_getparam", + "sched_get_priority_max", + "sched_get_priority_min", + "sched_getscheduler", + "sched_rr_get_interval", + "sched_setaffinity", + "sched_setattr", + "sched_setparam", + "sched_setscheduler", + "sched_yield", + "seccomp", + "select", + "semctl", + "semget", + "semop", + "semtimedop", + "send", + "sendfile", + "sendfile64", + "sendmmsg", + "sendmsg", + "sendto", + "setfsgid", + "setfsgid32", + "setfsuid", + "setfsuid32", + "setgid", + "setgid32", + "setgroups", + "setgroups32", + "setitimer", + "setpgid", + "setpriority", + "setregid", + "setregid32", + "setresgid", + "setresgid32", + "setresuid", + "setresuid32", + "setreuid", + "setreuid32", + "setrlimit", + "set_robust_list", + "setsid", + "setsockopt", + "set_thread_area", + "set_tid_address", + "setuid", + "setuid32", + "setxattr", + "shmat", + "shmctl", + "shmdt", + "shmget", + "shutdown", + "sigaltstack", + "signalfd", + "signalfd4", + "sigreturn", + "socket", + "socketcall", + "socketpair", + "splice", + "stat", + "stat64", + "statfs", + "statfs64", + "symlink", + "symlinkat", + "sync", + "sync_file_range", + "syncfs", + "sysinfo", + "syslog", + "tee", + "tgkill", + "time", + "timer_create", + "timer_delete", + "timerfd_create", + "timerfd_gettime", + "timerfd_settime", + "timer_getoverrun", + "timer_gettime", + "timer_settime", + "times", + "tkill", + "truncate", + "truncate64", + "ugetrlimit", + "umask", + "uname", + "unlink", + "unlinkat", + "utime", + "utimensat", + "utimes", + "vfork", + "vmsplice", + "wait4", + "waitid", + "waitpid", + "write", + "writev", + "mount", + "umount2", + "reboot", + "name_to_handle_at", + "unshare" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 0, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 8, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 4294967295, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "names": [ + "arch_prctl" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "modify_ldt" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "clone" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 2080505856, + "valueTwo": 0, + "op": "SCMP_CMP_MASKED_EQ" + } + ] + } + ] + } + } + } +} +``` + +##### Example config.json file +```json +{ + "ociVersion": "1.0.0", + "hostname": "hostname1", + "root": { + "path": "/run/worker-launcher/6703-1a23ca4b-6062-4d08-8ac3-b09e7d35e7cb/rootfs", + "readonly": true + }, + "process": { + "args": [ + "bash", + "/home/y/var/storm/workers/1a23ca4b-6062-4d08-8ac3-b09e7d35e7cb/storm-worker-script.sh" + ], + "cwd": "/home/y/var/storm/workers/1a23ca4b-6062-4d08-8ac3-b09e7d35e7cb", + "env": [ + "X_SCLS=rh-git218", + "LD_LIBRARY_PATH=/opt/rh/httpd24/root/usr/lib64", + "PATH=/opt/rh/rh-git218/root/usr/bin:/home/y/bin64:/home/y/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/home/y/share/yjava_jdk/java/bin", + "PERL5LIB=/opt/rh/rh-git218/root/usr/share/perl5/vendor_perl", + "LANG=en_US.UTF-8", + "LANGUAGE=en_US:en", + "LC_ALL=en_US.UTF-8", + "JAVA_HOME=/home/y/share/yjava_jdk/java", + "LD_LIBRARY_PATH=/home/y/var/storm/supervisor/stormdist/wc1-2-1608581491/resources/Linux-amd64:/home/y/var/storm/supervisor/stormdist/wc1-2-1608581491/resources:/home/y/lib64:/usr/local/lib64:/usr/lib64:/lib64:" + ], + "noNewPrivileges": true, + "user": { + "uid": 31315, + "gid": 100, + "additionalGids": [ + 5548 + ] + } + }, + "mounts": [ + { + "source": "proc", + "destination": "/proc", + "type": "proc" + }, + { + "source": "tmpfs", + "destination": "/dev", + "type": "tmpfs", + "options": [ + "nosuid", + "strictatime", + "mode=755", + "size=65536k" + ] + }, + { + "source": "devpts", + "destination": "/dev/pts", + "type": "devpts", + "options": [ + "nosuid", + "noexec", + "newinstance", + "ptmxmode=0666", + "mode=0620", + "gid=5" + ] + }, + { + "source": "shm", + "destination": "/dev/shm", + "type": "tmpfs", + "options": [ + "nosuid", + "noexec", + "nodev", + "mode=1777", + "size=65536k" + ] + }, + { + "source": "mqueue", + "destination": "/dev/mqueue", + "type": "mqueue", + "options": [ + "nosuid", + "noexec", + "nodev" + ] + }, + { + "source": "sysfs", + "destination": "/sys", + "type": "sysfs", + "options": [ + "nosuid", + "noexec", + "nodev", + "ro" + ] + }, + { + "source": "cgroup", + "destination": "/sys/fs/cgroup", + "type": "cgroup", + "options": [ + "nosuid", + "noexec", + "nodev", + "relatime", + "ro" + ] + }, + { + "destination": "/home/y/lib64/storm", + "type": "bind", + "source": "/home/y/lib64/storm", + "options": [ + "ro", + "rbind", + "rprivate" + ] + }, + { + "destination": "/etc/krb5.conf", + "type": "bind", + "source": "/etc/krb5.conf", + "options": [ + "ro", + "rbind", + "rprivate" + ] + }, + { + "destination": "/etc/resolv.conf", + "type": "bind", + "source": "/etc/resolv.conf", + "options": [ + "ro", + "rbind", + "rprivate" + ] + }, + { + "destination": "/etc/hostname", + "type": "bind", + "source": "/etc/hostname", + "options": [ + "ro", + "rbind", + "rprivate" + ] + }, + { + "destination": "/etc/hosts", + "type": "bind", + "source": "/etc/hosts", + "options": [ + "ro", + "rbind", + "rprivate" + ] + }, + { + "destination": "/var/run/nscd", + "type": "bind", + "source": "/var/run/nscd", + "options": [ + "ro", + "rbind", + "rprivate" + ] + }, + + { + "destination": "/sys/fs/cgroup", + "type": "bind", + "source": "/sys/fs/cgroup", + "options": [ + "ro", + "rbind", + "rprivate" + ] + }, + { + "destination": "/home/y/var/storm/supervisor", + "type": "bind", + "source": "/home/y/var/storm/supervisor", + "options": [ + "ro", + "rbind", + "rprivate" + ] + }, + { + "destination": "/home/y/var/storm/workers/1a23ca4b-6062-4d08-8ac3-b09e7d35e7cb", + "type": "bind", + "source": "/home/y/var/storm/workers/1a23ca4b-6062-4d08-8ac3-b09e7d35e7cb", + "options": [ + "rw", + "rbind", + "rprivate" + ] + }, + { + "destination": "/home/y/var/storm/workers-artifacts/wc1-2-1608581491/6703", + "type": "bind", + "source": "/home/y/var/storm/workers-artifacts/wc1-2-1608581491/6703", + "options": [ + "rw", + "rbind", + "rprivate" + ] + }, + { + "destination": "/home/y/var/storm/workers-users/1a23ca4b-6062-4d08-8ac3-b09e7d35e7cb", + "type": "bind", + "source": "/home/y/var/storm/workers-users/1a23ca4b-6062-4d08-8ac3-b09e7d35e7cb", + "options": [ + "rw", + "rbind", + "rprivate" + ] + }, + { + "destination": "/home/y/var/storm/supervisor/stormdist/wc1-2-1608581491/shared_by_topology", + "type": "bind", + "source": "/home/y/var/storm/supervisor/stormdist/wc1-2-1608581491/shared_by_topology", + "options": [ + "rw", + "rbind", + "rprivate" + ] + }, + { + "destination": "/tmp", + "type": "bind", + "source": "/home/y/var/storm/workers/1a23ca4b-6062-4d08-8ac3-b09e7d35e7cb/tmp", + "options": [ + "rw", + "rbind", + "rprivate" + ] + } + ], + "linux": { + "cgroupsPath": "/storm/6703-1a23ca4b-6062-4d08-8ac3-b09e7d35e7cb", + "resources": { + "devices": [ + { + "access": "rwm", + "allow": false + } + ], + "cpu": { + "quota": 140000, + "period": 100000 + } + }, + "namespaces": [ + { + "type": "pid" + }, + { + "type": "ipc" + }, + { + "type": "uts" + }, + { + "type": "mount" + } + ], + "maskedPaths": [ + "/proc/kcore", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + "/proc/scsi", + "/sys/firmware" + ], + "readonlyPaths": [ + "/proc/asound", + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger" + ], + "seccomp": { + "defaultAction": "SCMP_ACT_ERRNO", + "syscalls": [ + { + "names": [ + "accept", + "accept4", + "access", + "alarm", + "alarm", + "bind", + "brk", + "capget", + "capset", + "chdir", + "chmod", + "chown", + "chown32", + "clock_getres", + "clock_gettime", + "clock_nanosleep", + "close", + "connect", + "copy_file_range", + "creat", + "dup", + "dup2", + "dup3", + "epoll_create", + "epoll_create1", + "epoll_ctl", + "epoll_ctl_old", + "epoll_pwait", + "epoll_wait", + "epoll_wait_old", + "eventfd", + "eventfd2", + "execve", + "execveat", + "exit", + "exit_group", + "faccessat", + "fadvise64", + "fadvise64_64", + "fallocate", + "fanotify_mark", + "fchdir", + "fchmod", + "fchmodat", + "fchown", + "fchown32", + "fchownat", + "fcntl", + "fcntl64", + "fdatasync", + "fgetxattr", + "flistxattr", + "flock", + "fork", + "fremovexattr", + "fsetxattr", + "fstat", + "fstat64", + "fstatat64", + "fstatfs", + "fstatfs64", + "fsync", + "ftruncate", + "ftruncate64", + "futex", + "futimesat", + "getcpu", + "getcwd", + "getdents", + "getdents64", + "getegid", + "getegid32", + "geteuid", + "geteuid32", + "getgid", + "getgid32", + "getgroups", + "getgroups32", + "getitimer", + "getpeername", + "getpgid", + "getpgrp", + "getpid", + "getppid", + "getpriority", + "getrandom", + "getresgid", + "getresgid32", + "getresuid", + "getresuid32", + "getrlimit", + "get_robust_list", + "getrusage", + "getsid", + "getsockname", + "getsockopt", + "get_thread_area", + "gettid", + "gettimeofday", + "getuid", + "getuid32", + "getxattr", + "inotify_add_watch", + "inotify_init", + "inotify_init1", + "inotify_rm_watch", + "io_cancel", + "ioctl", + "io_destroy", + "io_getevents", + "ioprio_get", + "ioprio_set", + "io_setup", + "io_submit", + "ipc", + "kill", + "lchown", + "lchown32", + "lgetxattr", + "link", + "linkat", + "listen", + "listxattr", + "llistxattr", + "_llseek", + "lremovexattr", + "lseek", + "lsetxattr", + "lstat", + "lstat64", + "madvise", + "mbind", + "memfd_create", + "mincore", + "mkdir", + "mkdirat", + "mknod", + "mknodat", + "mlock", + "mlock2", + "mlockall", + "mmap", + "mmap2", + "mprotect", + "mq_getsetattr", + "mq_notify", + "mq_open", + "mq_timedreceive", + "mq_timedsend", + "mq_unlink", + "mremap", + "msgctl", + "msgget", + "msgrcv", + "msgsnd", + "msync", + "munlock", + "munlockall", + "munmap", + "nanosleep", + "newfstatat", + "_newselect", + "open", + "openat", + "pause", + "pipe", + "pipe2", + "poll", + "ppoll", + "prctl", + "pread64", + "preadv", + "prlimit64", + "pselect6", + "pwrite64", + "pwritev", + "read", + "readahead", + "readlink", + "readlinkat", + "readv", + "recv", + "recvfrom", + "recvmmsg", + "recvmsg", + "remap_file_pages", + "removexattr", + "rename", + "renameat", + "renameat2", + "restart_syscall", + "rmdir", + "rt_sigaction", + "rt_sigpending", + "rt_sigprocmask", + "rt_sigqueueinfo", + "rt_sigreturn", + "rt_sigsuspend", + "rt_sigtimedwait", + "rt_tgsigqueueinfo", + "sched_getaffinity", + "sched_getattr", + "sched_getparam", + "sched_get_priority_max", + "sched_get_priority_min", + "sched_getscheduler", + "sched_rr_get_interval", + "sched_setaffinity", + "sched_setattr", + "sched_setparam", + "sched_setscheduler", + "sched_yield", + "seccomp", + "select", + "semctl", + "semget", + "semop", + "semtimedop", + "send", + "sendfile", + "sendfile64", + "sendmmsg", + "sendmsg", + "sendto", + "setfsgid", + "setfsgid32", + "setfsuid", + "setfsuid32", + "setgid", + "setgid32", + "setgroups", + "setgroups32", + "setitimer", + "setpgid", + "setpriority", + "setregid", + "setregid32", + "setresgid", + "setresgid32", + "setresuid", + "setresuid32", + "setreuid", + "setreuid32", + "setrlimit", + "set_robust_list", + "setsid", + "setsockopt", + "set_thread_area", + "set_tid_address", + "setuid", + "setuid32", + "setxattr", + "shmat", + "shmctl", + "shmdt", + "shmget", + "shutdown", + "sigaltstack", + "signalfd", + "signalfd4", + "sigreturn", + "socket", + "socketcall", + "socketpair", + "splice", + "stat", + "stat64", + "statfs", + "statfs64", + "symlink", + "symlinkat", + "sync", + "sync_file_range", + "syncfs", + "sysinfo", + "syslog", + "tee", + "tgkill", + "time", + "timer_create", + "timer_delete", + "timerfd_create", + "timerfd_gettime", + "timerfd_settime", + "timer_getoverrun", + "timer_gettime", + "timer_settime", + "times", + "tkill", + "truncate", + "truncate64", + "ugetrlimit", + "umask", + "uname", + "unlink", + "unlinkat", + "utime", + "utimensat", + "utimes", + "vfork", + "vmsplice", + "wait4", + "waitid", + "waitpid", + "write", + "writev", + "mount", + "umount2", + "reboot", + "name_to_handle_at", + "unshare" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 0, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 8, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 4294967295, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "names": [ + "arch_prctl" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "modify_ldt" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "clone" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 2080505856, + "valueTwo": 0, + "op": "SCMP_CMP_MASKED_EQ" + } + ] + } + ] + } + } +} +``` + +## CGroup Metrics + +Runc internally uses cgroups to control resources for containers. The CGroup Metrics described at [cgroups_in_storm.md](cgroups_in_storm.md#CGroup-Metrics) still apply except CGroupCpuGuarantee. To get CGroup cpu guarantee, use CGroupCpuGuaranteeByCfsQuota instead. \ No newline at end of file diff --git a/docs/Pacemaker.md b/docs/Pacemaker.md new file mode 100644 index 00000000000..75fa7485543 --- /dev/null +++ b/docs/Pacemaker.md @@ -0,0 +1,112 @@ +--- +title: Pacemaker +layout: documentation +documentation: true +--- + + +### Introduction +Pacemaker is a storm daemon designed to process heartbeats from workers. As Storm is scaled up, ZooKeeper begins to become a bottleneck due to high volumes of writes from workers doing heartbeats. Lots of writes to disk and too much traffic across the network is generated as ZooKeeper tries to maintain consistency. + +Because heartbeats are of an ephemeral nature, they do not need to be persisted to disk or synced across nodes; an in-memory store will do. This is the role of Pacemaker. Pacemaker functions as a simple in-memory key/value store with ZooKeeper-like, directory-style keys and byte array values. + +The corresponding Pacemaker client is a plugin for the `ClusterState` interface, `org.apache.storm.cluster.PaceMakerStateStorageFactory`. Heartbeat calls are funneled by the `ClusterState` produced by `pacemaker_state_factory` into the Pacemaker daemon, while other set/get operations are forwarded to ZooKeeper. + +------ + +### Configuration + + - `pacemaker.servers` : The hosts that the Pacemaker daemons are running on + - `pacemaker.port` : The port that Pacemaker will listen on + - `pacemaker.max.threads` : Maximum number of threads Pacemaker daemon will use to handle requests. + - `pacemaker.childopts` : Any JVM parameters that need to go to the Pacemaker. + - `pacemaker.auth.method` : The authentication method that is used (more info below) + +#### Example + +To get Pacemaker up and running, set the following option in the cluster config on all nodes: +``` +storm.cluster.state.store: "org.apache.storm.cluster.PaceMakerStateStorageFactory" +``` + +The Pacemaker servers also need to be set on all nodes: +``` +pacemaker.servers: + - somehost.mycompany.com + - someotherhost.mycompany.com +``` + +And then start all of your daemons + +(including Pacemaker): +``` +$ storm pacemaker +``` + +The Storm cluster should now be pushing all worker heartbeats through Pacemaker. + +### Security + +Currently digest (password-based) and Kerberos security are supported. Security is currently only around reads, not writes. Writes may be performed by anyone, whereas reads may only be performed by authorized and authenticated users. This is an area for future development, as it leaves the cluster open to DoS attacks, but it prevents any sensitive information from reaching unauthorized eyes, which was the main goal. + +#### Digest +To configure digest authentication, set `pacemaker.auth.method: DIGEST` in the cluster config on the nodes hosting Nimbus and Pacemaker. +The nodes must also have `java.security.auth.login.config` set to point to a JAAS config file containing the following structure: +``` +PacemakerDigest { + username="some username" + password="some password"; +}; +``` + +Any node with these settings configured will be able to read from Pacemaker. +Worker nodes need not have these configs set, and may keep `pacemaker.auth.method: NONE` set, since they do not need to read from the Pacemaker daemon. + +#### Kerberos +To configure Kerberos authentication, set `pacemaker.auth.method: KERBEROS` in the cluster config on the nodes hosting Nimbus and Pacemaker. +The nodes must also have `java.security.auth.login.config` set to point to a JAAS config. + +The JAAS config on Nimbus must look something like this: +``` +PacemakerClient { + com.sun.security.auth.module.Krb5LoginModule required + useKeyTab=true + keyTab="/etc/keytabs/nimbus.keytab" + storeKey=true + useTicketCache=false + serviceName="pacemaker" + principal="nimbus@MY.COMPANY.COM"; +}; + +``` + +The JAAS config on Pacemaker must look something like this: +``` +PacemakerServer { + com.sun.security.auth.module.Krb5LoginModule required + useKeyTab=true + keyTab="/etc/keytabs/pacemaker.keytab" + storeKey=true + useTicketCache=false + principal="pacemaker@MY.COMPANY.COM"; +}; +``` + + - The client's user principal in the `PacemakerClient` section on the Nimbus host must match the `nimbus.daemon.user` storm cluster config value. + - The client's `serviceName` value must match the server's user principal in the `PacemakerServer` section on the Pacemaker host. + + +### Fault Tolerance + +Pacemaker runs as a single daemon instance, making it a potential Single Point of Failure. + +If Pacemaker becomes unreachable by Nimbus, through crash or network partition, the workers will continue to run, and Nimbus will repeatedly attempt to reconnect. Nimbus functionality will be disrupted, but the topologies themselves will continue to run. +In case of partition of the cluster where Nimbus and Pacemaker are on the same side of the partition, the workers that are on the other side of the partition will not be able to heartbeat, and Nimbus will reschedule the tasks elsewhere. This is probably what we want to happen anyway. + + +### ZooKeeper Comparison +Compared to ZooKeeper, Pacemaker uses less CPU, less memory, and of course no disk for the same load, thanks to lack of overhead from maintaining consistency between nodes. +On Gigabit networking, there is a theoretical limit of about 6000 nodes. However, the real limit is likely around 2000-3000 nodes. These limits have not yet been tested. +On a 270 supervisor cluster, fully scheduled with topologies, Pacemaker resource utilization was 70% of one core and nearly 1GiB of RAM on a machine with 4 `Intel(R) Xeon(R) CPU E5530 @ 2.40GHz` and 24GiB of RAM. + +Pacemaker now supports HA. Multiple Pacemaker instances can be used at once in a storm cluster to allow massive scalability. Just include the names of the Pacemaker hosts in the pacemaker.servers config and workers and Nimbus will start communicating with them. They're fault tolerant as well. The system keeps on working as long as there is at least one pacemaker left running - provided it can handle the load. diff --git a/docs/Performance.md b/docs/Performance.md new file mode 100644 index 00000000000..df8f3e09dff --- /dev/null +++ b/docs/Performance.md @@ -0,0 +1,179 @@ +--- +title: Performance Tuning +layout: documentation +documentation: true +--- + +Latency, throughput and resource consumption are the three key dimensions involved in performance tuning. +In the following sections, we discuss the settings that can be used to tune along these dimensions and understand the trade-offs. + +It is important to understand that these settings can vary depending on the topology, the type of hardware, and the number of hosts used by the topology. + +## 1. Buffer Size +Spouts and Bolts operate asynchronously using message passing. Message queues used for this purpose are of fixed but configurable size. Buffer size +refers to the size of these queues. Every consumer has its own receive queue. The messages wait in the queue until the consumer is ready to process them. +The queue will typically be almost empty or almost full depending on whether the consumer is operating faster or slower than the rate at which producers +are generating messages for it. Storm queues always have a single consumer and potentially multiple producers. There are two buffer size settings +of interest: + +- `topology.executor.receive.buffer.size` : This is the size of the message queue employed for each spout and bolt executor. +- `topology.transfer.buffer.size` : This is the size of the outbound message queue used for inter-worker messaging. This queue is referred to as +the *Worker Transfer Queue*. + +**Note:** If the specified buffer size is not a power of 2, it is internally rounded up to the next power of 2. + +#### Guidance +Very small message queues (size < 1024) are likely to hamper throughput by not providing enough isolation between the consumer and producer. This +can affect the asynchronous nature of the processing as the producers are likely to find the downstream queue to be full. + +Very large message queues are also not desirable to deal with slow consumers. Better to employ more consumers (i.e. bolts) on additional CPU cores instead. If queues +are large and often full, the messages will end up waiting longer in these queues at each step of the processing, leading to poor latency being +reported on the Storm UI. Large queues also imply higher memory consumption especially if the queues are typically full. + + +## 2. Batch Size +Producers can either write a batch of messages to the consumer's queue or write each message individually. This batch size can be configured. +Inserting messages in batches to downstream queues helps reduce the number of synchronization operations required for the inserts. Consequently, this helps achieve higher throughput. However, +sometimes it may take a little time for the buffer to fill up, before it is flushed into the downstream queue. This implies that the buffered messages +will take longer to become visible to the downstream consumer who is waiting to process them. This can increase the average end-to-end latency for +these messages. The latency can get very bad if the batch sizes are large and the topology is not experiencing high traffic. + +- `topology.producer.batch.size` : The batch size for writes into the receive queue of any spout/bolt is controlled via this setting. This setting +impacts the communication within a worker process. Each upstream producer maintains a separate batch to a component's receive queue. So if two spout +instances are writing to the same downstream bolt instance, each of the spout instances will have to maintain a separate batch. + +- `topology.transfer.batch.size` : Messages that are destined to a spout/bolt running on a different worker process, are sent to a queue called +the **Worker Transfer Queue**. The Worker Transfer Thread is responsible for draining the messages in this queue and send them to the appropriate +worker process over the network. This setting controls the batch size for writes into the Worker Transfer Queue. This impacts the communication +between worker processes. + +#### Guidance + +**For Low latency:** Set batch size to 1. This basically disables batching. This is likely to reduce peak sustainable throughput under heavy traffic, but +not likely to impact throughput much under low/medium traffic situations. + +**For High throughput:** Set batch size > 1. Try values like 10, 100, 1000 or even higher and see what yields the best throughput for the topology. +Beyond a certain point the throughput is likely to get worse. + +**Varying throughput:** Topologies often experience fluctuating amounts of incoming traffic over the day. Other topos may experience higher traffic in some +paths and lower throughput in other paths simultaneously. If latency is not a concern, a small bach size (e.g. 10) and in conjunction with the right flush +frequency may provide a reasonable compromise for such scenarios. For meeting stricter latency SLAs, consider setting it to 1. + + +## 3. Flush Tuple Frequency +In low/medium traffic situations or when batch size is too large, the batches may take too long to fill up and consequently the messages could take unacceptably +long time to become visible to downstream components. In such case, periodic flushing of batches is necessary to keep the messages moving and avoid compromising +latencies when batching is enabled. + +When batching has been enabled, special messages called *flush tuples* are inserted periodically into the receive queues of all spout and bolt instances. +This causes each spout/bolt instance to flush all its outstanding batches to their respective downstream components. + +`topology.flush.tuple.freq.millis` : This setting controls how often the flush tuples are generated. Flush tuples are not generated if this configuration is +set to 0 or if (`topology.producer.batch.size`=1 and `topology.transfer.batch.size`=1). + + +#### Guidance +Flushing interval can be used as a tool to retain the higher throughput benefits of batching and avoid batched messages getting stuck for too long waiting for theirs. +batch to fill. Preferably this value should be larger than the average execute latencies of the bolts in the topology. Trying to flush the queues more frequently than +the amount of time it takes to produce the messages may hurt performance. Understanding the average execute latencies of each bolt will help determine the average +number of messages in the queues between two flushes. + +**For Low latency:** A smaller value helps achieve tighter latency SLAs. + +**For High throughput:** When trying to maximize throughput under high traffic situations, the batches are likely to get filled and flushed automatically. +To optimize for such cases, this value can be set to a higher number. + +**Varying throughput:** If latency is not a concern, a larger value will optimize for high traffic situations. For meeting tighter SLAs set this to lower +values. + + +## 4. Wait Strategy +Wait strategies are used to conserve CPU usage by trading off some latency and throughput. They are applied for the following situations: + +4.1 **Spout Wait:** In low/no traffic situations, Spout's nextTuple() may not produce any new emits. To prevent invoking the Spout's nextTuple too often, +this wait strategy is used between nextTuple() calls, allowing the spout's executor thread to idle and conserve CPU. Spout wait strategy is also used +when the `topology.max.spout.pending` limit has been reached when ACKers are enabled. Select a strategy using `topology.spout.wait.strategy`. Configure the +chosen wait strategy using one of the `topology.spout.wait.*` settings. + +4.2 **Bolt Wait:** : When a bolt polls it's receive a queue for new messages to process, it is possible that the queue is empty. This typically happens +in case of low/no traffic situations or when the upstream spout/bolt is inherently slower. This wait strategy is used in such cases. It avoids high CPU usage +due to the bolt continuously checking on a typically empty queue. Select a strategy using `topology.bolt.wait.strategy`. The chosen strategy can be further configured +using the `topology.bolt.wait.*` settings. + +4.3 **Backpressure Wait** : Select a strategy using `topology.backpressure.wait.strategy`. When a spout/bolt tries to write to a downstream component's receive queue, +there is a possibility that the queue is full. In such cases, the write needs to be retried. This wait strategy is used to induce some idling in-between re-attempts for +conserving CPU. The chosen strategy can be further configured using the `topology.backpressure.wait.*` settings. + + +#### Built-in wait strategies: +These wait strategies are available for use with all of the above mentioned wait situations. + +- **ProgressiveWaitStrategy** : This strategy can be used for Bolt Wait or Backpressure Wait situations. Set the strategy to 'org.apache.storm.policy.WaitStrategyProgressive' to +select this wait strategy. This is a dynamic wait strategy that enters into progressively deeper states of CPU conservation if the Backpressure Wait or Bolt Wait situations persist. +It has 3 levels of idling and allows configuring how long to stay at each level : + + 1. Level1 / No Waiting - The first few times it will return immediately. This does not conserve any CPU. The number of times it remains in this state is configured using + `topology.spout.wait.progressive.level1.count` or `topology.bolt.wait.progressive.level1.count` or `topology.backpressure.wait.progressive.level1.count` depending which + situation it is being used. + + 2. Level 2 / Park Nanos - In this state it disables the current thread for thread scheduling purposes, for 1 nano second using LockSupport.parkNanos(). This puts the CPU in a minimal + conservation state. It remains in this state for `topology.spout.wait.progressive.level2.count` or `topology.bolt.wait.progressive.level2.count` or + `topology.backpressure.wait.progressive.level2.count` iterations. + + 3. Level 3 / Thread.sleep() - In this level it calls Thread.sleep() with the value specified in `topology.spout.wait.progressive.level3.sleep.millis` or + `topology.bolt.wait.progressive.level3.sleep.millis` or `topology.backpressure.wait.progressive.level3.sleep.millis`. This is the most CPU conserving level and it remains in + this level for the remaining iterations. + + +- **ParkWaitStrategy** : This strategy can be used for Bolt Wait or Backpressure Wait situations. Set the strategy to `org.apache.storm.policy.WaitStrategyPark` to use this. +This strategy disables the current thread for thread scheduling purposes by calling LockSupport.parkNanos(). The amount of park time is configured using either +`topology.bolt.wait.park.microsec` or `topology.backpressure.wait.park.microsec` based on the wait situation it is used. Setting the park time to 0, effectively disables +invocation of LockSupport.parkNanos and this mode can be used to achieve busy polling (which at the cost of high CPU utilization even when idle, may improve latency and/or throughput). + + +## 5. Max.spout.pending +The setting `topology.max.spout.pending` limits the number of un-ACKed tuples at the spout level. Once a spout reaches this limit, the spout's nextTuple() +method will not be called until some ACKs are received for the outstanding emits. This setting does not have any affect if ACKing is disabled. It +is a spout throttling mechanism which can impact throughput and latency. Setting it to null disables it for storm-core topologies. Impact on throughput +is dependent on the topology and its concurrency (workers/executors), so experimentation is necessary to determine optimal setting. Latency and memory consumption +is expected to typically increase with higher and higher values for this. + + +## 6. Load Aware messaging +When load aware messaging is enabled (default), shuffle grouping takes additional factors into consideration for message routing. +Impact of this on performance is dependent on the topology and its deployment footprint (i.e. distribution over process and machines). +Consequently it is useful to assess the impact of setting `topology.disable.loadaware.messaging` to `true` or `false` for your +specific case. + + +## 7. Sampling Rate +Sampling rate is used to control how often certain metrics are computed on the Spout and Bolt executors. This is configured using `topology.stats.sample.rate` +Setting it to 1 means, the stats are computed for every emitted message. As an example, to sample once every 1000 messages it can be set to 0.001. It may be +possible to improve throughput and latency by reducing the sampling rate. + + +## 8. Budgeting CPU cores for Executors +There are three main types of executors (i.e threads) to take into account when budgeting CPU cores for them. Spout Executors, Bolt Executors, Worker Transfer (handles outbound +messages) and NettyWorker (handles inbound messages). +The first two are used to run spout, bolt and acker instances. The Worker Transfer thread is used to serialize and send messages to other workers (in multi-worker mode). + +Executors that are expected to remain busy, either because they are handling a lot of messages, or because their processing is inherently CPU intensive, should be allocated +1 physical core each. Allocating logical cores (instead of physical) or less than 1 physical core for CPU intensive executors increases CPU contention and performance can suffer. +Executors that are not expected to be busy can be allocated a smaller fraction of the physical core (or even logical cores). It maybe not be economical to allocate a full physical +core for executors that are not likely to saturate the CPU. + +The *system bolt* generally processes very few messages per second, and so requires very little cpu (typically less than 10% of a physical core). + + +## 9. Garbage Collection +Choice of GC is an important concern for topologies that are latency or throughput sensitive. It is recommended to try both the CMS and G1 collectors. Performance characteristics +of the collectors can change between single and multiworker modes and are dependent on hardware characteristics such as the number of CPUs and memory localities. Number of GC threads can +also affect performance. Sometimes fewer GC threads can yield better performance. It is advisable to select a collector and tune it by mimicking anticipated peak data rates on hardware +similar to what is used in production. + + +## 10. Scaling out with Single Worker mode +Communication between executors within a worker process is very fast as there is neither a need to serialize and deserialize messages nor does it involve communicating over the network +stack. In multiworker mode, messages often cross worker process boundaries. For performance sensitive cases, if it is possible to configure a topology to run as many single-worker +instances (for ex. one worker per input partition) rather than one multiworker instance, it may yield significantly better throughput and latency on the same hardware. +The downside to this approach is that it adds the overhead of monitoring and managing many instances rather than one multiworker instance. diff --git a/docs/Project-ideas.md b/docs/Project-ideas.md new file mode 100644 index 00000000000..1af48058112 --- /dev/null +++ b/docs/Project-ideas.md @@ -0,0 +1,6 @@ +--- +layout: documentation +--- + * **DSLs for non-JVM languages:** These DSL's should be all-inclusive and not require any Java for the creation of topologies, spouts, or bolts. Since topologies are [Thrift](https://thrift.apache.org/) structs, Nimbus is a Thrift service, and bolts can be written in any language, this is possible. + * **Online machine learning algorithms:** Something like [Mahout](http://mahout.apache.org/) but for online algorithms + * **Suite of performance benchmarks:** These benchmarks should test Storm's performance on CPU and IO intensive workloads. There should be benchmarks for different classes of applications, such as stream processing (where throughput is the priority) and distributed RPC (where latency is the priority). diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 00000000000..e92193a7eff --- /dev/null +++ b/docs/README.md @@ -0,0 +1,113 @@ +# Apache Storm Website and Documentation +This is the source for the Release specific part of the Apache Storm website and documentation. It is statically generated using [jekyll](http://jekyllrb.com). + +## Generate Javadoc + +You have to generate javadoc on the project root before generating the document site. + +``` +mvn javadoc:javadoc -Dnotimestamp=true +mvn javadoc:aggregate -DreportOutputDirectory=./docs/ -DdestDir=javadocs -Dnotimestamp=true +``` + +You need to create a distribution package with gpg certificate. Please refer [here](https://github.com/apache/storm/blob/master/DEVELOPER.md#packaging). + +## Site Generation +First install jekyll (assuming you have ruby installed): + +``` +gem install jekyll +``` + +Generate the site, and start a server locally: +``` +cd docs +jekyll serve -w +``` + +The `-w` option tells jekyll to watch for changes to files and regenerate the site automatically when any content changes. + +Point your browser to http://localhost:4000 + +By default, jekyll will generate the site in a `_site` directory. + +This will only show the portion of the documentation that is specific to this release. + +## Adding a new release to the website +In order to add a new release, you must have committer access to Storm's subversion repository at https://svn.apache.org/repos/asf/storm/site. + +Release documentation is placed under the releases directory named after the release version. Most metadata about the release will be generated automatically from the name using a jekyll plugin. Or by placing them in the _data/releases.yml file. + +To create a new release run the following from the main git directory + +``` +mvn javadoc:javadoc -Dnotimestamp=true +mvn javadoc:aggregate -DreportOutputDirectory=./docs/ -DdestDir=javadocs -Dnotimestamp=true +mkdir ${path_to_svn}/releases/${release_name} +#Copy everything over, and compare checksums, except for things that are part of the site, +# and are not release specific like the _* directories that are jekyll specific +# assests/ css/ and README.md +rsync -ac --delete --exclude _\* --exclude assets --exclude css --exclude README.md ./docs/ ${path_to_svn}/releases/${release_name} +cd ${path_to_svn} +svn add releases/${release_name} +svn commit +``` + +to publish a new release run + +``` +cd ${path_to_svn} +jekyll build -d publish/ +svn add publish/ #Add any new files +svn commit +``` + +## How to release specific docs work + +Release specific documentation is controlled by a jekyll plugin [releases.rb](./_plugins/releases.rb) + +If the plugin is running from the git repo the config `storm_release_only` is set and the plugin will treat all of the markdown files as release specific file. + +If it is running from the subversion repository it will look in the releases directory for release specific docs. + +http://svn.apache.org/viewvc/storm/site/releases/ + +Each subdirectory named after the release in question. The "current" release is pointed to by a symlink in that directory called `current`. + +The plugin sets three configs for each release page. + + * version - the version number of the release/directory + * git-tree-base - a link to a directory in github that this version is on + * git-blob-base - a link to where on github that this version is on, but should be used when pointing to files. + +If `storm_release_only` is set for the project the version is determined from the maven pom.xml and the branch is the current branch in git. If it is not set the version is determined by the name of the sub-directory and the branch is assumed to be a `"v#{version}"` which corresponds with our naming conventions. For SNAPSHOT releases you will need to override this in `_data/releases.yml` + +The plugin also augments the `site.data.releases` dataset. +Each release in the list includes the following, and each can be set in `_data/releases.yml` to override what is automatically generated by the plugin. + + * git-tag-or-branch - tag or branch name on github/apache/storm + * git-tree-base - a link to a directory in github that this version is on + * git-blob-base - a link to where on github that this version is on, but should be used when pointing to files. + * base-name - name of the release files to download, without the .tar.gz + * has-download - if this is an official release and a download link should be created. + +So if you wanted to create a link to a file on github inside the release specific docs you would create a link like + +``` +[LICENSE]([DEVELOPER.md]({{page.git-blob-base}}/LICENSE) +``` + +If you wanted to create a maven string to tell people what dependency to use you would do something like + +``` + + ... + {{version}} + +``` + +If you want to refer to a javadoc for the current release use a relative path. It will be in the javadocs subdirectory. + +``` +[TopologyBuilder](javadocs/org/apache/storm/topology/TopologyBuilder.html) +``` diff --git a/docs/Rationale.md b/docs/Rationale.md new file mode 100644 index 00000000000..45ff396b641 --- /dev/null +++ b/docs/Rationale.md @@ -0,0 +1,33 @@ +--- +title: Rationale +layout: documentation +documentation: true +--- +The past decade has seen a revolution in data processing. MapReduce, Hadoop, and related technologies have made it possible to store and process data at scales previously unthinkable. Unfortunately, these data processing technologies are not realtime systems, nor are they meant to be. There's no hack that will turn Hadoop into a realtime system; realtime data processing has a fundamentally different set of requirements than batch processing. + +However, realtime data processing at massive scale is becoming more and more of a requirement for businesses. The lack of a "Hadoop of realtime" has become the biggest hole in the data processing ecosystem. + +Storm fills that hole. + +Before Storm, you would typically have to manually build a network of queues and workers to do realtime processing. Workers would process messages off a queue, update databases, and send new messages to other queues for further processing. Unfortunately, this approach has serious limitations: + +1. **Tedious**: You spend most of your development time configuring where to send messages, deploying workers, and deploying intermediate queues. The realtime processing logic that you care about corresponds to a relatively small percentage of your codebase. +2. **Brittle**: There's little fault-tolerance. You're responsible for keeping each worker and queue up. +3. **Painful to scale**: When the message throughput get too high for a single worker or queue, you need to partition how the data is spread around. You need to reconfigure the other workers to know the new locations to send messages. This introduces moving parts and new pieces that can fail. + +Although the queues and workers paradigm breaks down for large numbers of messages, message processing is clearly the fundamental paradigm for realtime computation. The question is: how do you do it in a way that doesn't lose data, scales to huge volumes of messages, and is dead-simple to use and operate? + +Storm satisfies these goals. + +## Why Storm is important + +Storm exposes a set of primitives for doing realtime computation. Like how MapReduce greatly eases the writing of parallel batch processing, Storm's primitives greatly ease the writing of parallel realtime computation. + +The key properties of Storm are: + +1. **Extremely broad set of use cases**: Storm can be used for processing messages and updating databases (stream processing), doing a continuous query on data streams and streaming the results into clients (continuous computation), parallelizing an intense query like a search query on the fly (distributed RPC), and more. Storm's small set of primitives satisfy a stunning number of use cases. +2. **Scalable**: Storm scales to massive numbers of messages per second. To scale a topology, all you have to do is add machines and increase the parallelism settings of the topology. As an example of Storm's scale, one of Storm's initial applications processed 1,000,000 messages per second on a 10 node cluster, including hundreds of database calls per second as part of the topology. Storm's usage of Zookeeper for cluster coordination makes it scale to much larger cluster sizes. +3. **Guarantees no data loss**: A realtime system must have strong guarantees about data being successfully processed. A system that drops data has a very limited set of use cases. Storm guarantees that every message will be processed, and this is in direct contrast with other systems like S4. +4. **Extremely robust**: Unlike systems like Hadoop, which are notorious for being difficult to manage, Storm clusters just work. It is an explicit goal of the Storm project to make the user experience of managing Storm clusters as painless as possible. +5. **Fault-tolerant**: If there are faults during execution of your computation, Storm will reassign tasks as necessary. Storm makes sure that a computation can run forever (or until you kill the computation). +6. **Programming language agnostic**: Robust and scalable realtime processing shouldn't be limited to a single platform. Storm topologies and processing components can be defined in any language, making Storm accessible to nearly anyone. diff --git a/docs/Resource_Aware_Scheduler_overview.md b/docs/Resource_Aware_Scheduler_overview.md new file mode 100644 index 00000000000..7fb0e31e26c --- /dev/null +++ b/docs/Resource_Aware_Scheduler_overview.md @@ -0,0 +1,513 @@ +--- +title: Resource Aware Scheduler +layout: documentation +documentation: true +--- + +# Introduction + +The purpose of this document is to provide a description of the Resource Aware Scheduler for the Storm distributed real-time computation system. This document will provide you with both a high level description of the resource aware scheduler in Storm. Some of the benefits are using a resource aware scheduler on top of Storm is outlined in the following presentation at Hadoop Summit 2016: + +http://www.slideshare.net/HadoopSummit/resource-aware-scheduling-in-apache-storm + +# Table of Contents +1. [Using Resource Aware Scheduler](#Using-Resource-Aware-Scheduler) +2. [API Overview](#API-Overview) + 1. [Setting Memory Requirement](#Setting-Memory-Requirement) + 2. [Shared Memory Requirement](#Setting-Shared-Memory) + 3. [Setting CPU Requirement](#Setting-CPU-Requirement) + 4. [Limiting the Heap Size per Worker (JVM) Process](#Limiting-the-Heap-Size-per-Worker-(JVM)Process) + 5. [Setting Available Resources on Node](#Setting-Available-Resources-on-Node) + 6. [Other Configurations](#Other-Configurations) +3. [Topology Priorities and Per User Resource](#Topology-Priorities-and-Per-User-Resource) + 1. [Setup](#Setup) + 2. [Specifying Topology Priority](#Specifying-Topology-Priority) + 3. [Specifying Scheduling Strategy](#Specifying-Scheduling-Strategy) + 4. [Specifying Topology Prioritization Strategy](#Specifying-Topology-Prioritization-Strategy) +4. [Profiling Resource Usage](#Profiling-Resource-Usage) +5. [Enhancements on original DefaultResourceAwareStrategy](#Enhancements-on-original-DefaultResourceAwareStrategy) + +
+ +## Using Resource Aware Scheduler + +The user can switch to using the Resource Aware Scheduler by setting the following in *conf/storm.yaml* +``` + storm.scheduler: “org.apache.storm.scheduler.resource.ResourceAwareScheduler” +``` +
+ +## API Overview + +For use with Trident, please see the [Trident RAS API](Trident-RAS-API.html) + +For a Storm Topology, the user can now specify the amount of resources a topology component (i.e. Spout or Bolt) is required to run a single instance of the component. The user can specify the resource requirement for a topology component by using the following API calls. + +
+ +### Setting Memory Requirement + +API to set component memory requirement: +``` + public T setMemoryLoad(Number onHeap, Number offHeap) +``` +Parameters: +* Number onHeap – The amount of on heap memory an instance of this component will consume in megabytes +* Number offHeap – The amount of off heap memory an instance of this component will consume in megabytes + +The user also has to option to just specify the on heap memory requirement if the component does not have an off heap memory need. +``` + public T setMemoryLoad(Number onHeap) +``` +Parameters: +* Number onHeap – The amount of on heap memory an instance of this component will consume + +If no value is provided for offHeap, 0.0 will be used. If no value is provided for onHeap, or if the API is never called for a component, the default value will be used. + +Example of Usage: +``` + SpoutDeclarer s1 = builder.setSpout("word", new TestWordSpout(), 10); + s1.setMemoryLoad(1024.0, 512.0); + builder.setBolt("exclaim1", new ExclamationBolt(), 3) + .shuffleGrouping("word").setMemoryLoad(512.0); +``` +The entire memory requested for this topology is 16.5 GB. That is from 10 spouts with 1GB on heap memory and 0.5 GB off heap memory each and 3 bolts with 0.5 GB on heap memory each. + +
+ +### Shared Memory + +In some cases you may have memory that is shared between components. It may be a as simple as a large static data structure, or as complex as static data that is memory mapped into a bolt and is shared across workers. In any case you can specify your shared memory request by +creating one of `SharedOffHeapWithinNode`, `SharedOffHeapWithinWorker`, or `SharedOnHeap` and adding it to bolts and spouts that use that shared memory. + +Example of Usage: + +``` + builder.setBolt("exclaim1", new ExclamationBolt(), 3).shuffleGrouping("word") + .addSharedMemory(new SharedOnHeap(100, "exclaim-cache")); +``` + +In the above example all of the "exclaim1" bolts in a worker will share 100MB of memory. + +``` + builder.setBolt("lookup", new LookupBolt(), 3).shuffleGrouping("spout") + .addSharedMemory(new SharedOffHeapWithinNode(500, "static-lookup")); +``` + +In this example all "lookup" bolts on a given node will share 500 MB or memory off heap. + + +
+ +### Setting CPU Requirement + +API to set component CPU requirement: +``` + public T setCPULoad(Double amount) +``` +Parameters: +* Number amount – The amount of on CPU an instance of this component will consume. + +Currently, the amount of CPU resources a component requires or is available on a node is represented by a point system. CPU usage is a difficult concept to define. Different CPU architectures perform differently depending on the task at hand. They are so complex that expressing all of that in a single precise portable number is impossible. Instead we take a convention over configuration approach and are primarily concerned with rough level of CPU usage while still providing the possibility to specify amounts more fine grained. + +By convention a CPU core typically will get 100 points. If you feel that your processors are more or less powerful you can adjust this accordingly. Heavy tasks that are CPU bound will get 100 points, as they can consume an entire core. Medium tasks should get 50, light tasks 25, and tiny tasks 10. In some cases you have a task that spawns other threads to help with processing. These tasks may need to go above 100 points to express the amount of CPU they are using. If these conventions are followed the common case for a single threaded task the reported Capacity * 100 should be the number of CPU points that the task needs. + +Example of Usage: +``` + SpoutDeclarer s1 = builder.setSpout("word", new TestWordSpout(), 10); + s1.setCPULoad(15.0); + builder.setBolt("exclaim1", new ExclamationBolt(), 3) + .shuffleGrouping("word").setCPULoad(10.0); + builder.setBolt("exclaim2", new HeavyBolt(), 1) + .shuffleGrouping("exclaim1").setCPULoad(450.0); +``` + +
+ +### Limiting the Heap Size per Worker (JVM) Process + +``` + public void setTopologyWorkerMaxHeapSize(Number size) +``` + +Parameters: +* Number size – The memory limit a worker process will be allocated in megabytes + +The user can limit the amount of memory resources the resource aware scheduler allocates to a single worker on a per topology basis by using the above API. This API is in place so that the users can spread executors to multiple workers. However, spreading executors to multiple workers may increase the communication latency since executors will not be able to use Disruptor Queue for intra-process communication. + +Example of Usage: +``` + Config conf = new Config(); + conf.setTopologyWorkerMaxHeapSize(512.0); +``` + +
+ +### Setting Available Resources on Node + +A storm administrator can specify node resource availability by modifying the *conf/storm.yaml* file located in the storm home directory of that node. + +A storm administrator can specify how much available memory a node has in megabytes adding the following to *storm.yaml* +``` + supervisor.memory.capacity.mb: [amount] +``` +A storm administrator can also specify how much available CPU resources a node has available adding the following to *storm.yaml* +``` + supervisor.cpu.capacity: [amount] +``` + +Note: that the amount the user can specify for the available CPU is represented using a point system like discussed earlier. + +Example of Usage: +``` + supervisor.memory.capacity.mb: 20480.0 + supervisor.cpu.capacity: 100.0 +``` + +
+ +### Other Configurations + +The user can set some default configurations for the Resource Aware Scheduler in *conf/storm.yaml*: + +``` + //default value if on heap memory requirement is not specified for a component + topology.component.resources.onheap.memory.mb: 128.0 + + //default value if off heap memory requirement is not specified for a component + topology.component.resources.offheap.memory.mb: 0.0 + + //default value if CPU requirement is not specified for a component + topology.component.cpu.pcore.percent: 10.0 + + //default value for the max heap size for a worker + topology.worker.max.heap.size.mb: 768.0 +``` + +### Warning + +If Resource Aware Scheduling is enabled, it will dynamically calculate the number of workers and the `topology.workers` setting is ignored. + +
+ +## Topology Priorities and Per User Resource + +The Resource Aware Scheduler or RAS also has multi-tenant capabilities since many Storm users typically share a Storm cluster. Resource Aware Scheduler can allocate resources on a per user basis. Each user can be guaranteed a certain amount of resources to run his or her topologies and the Resource Aware Scheduler will meet those guarantees when possible. When the Storm cluster has extra free resources, Resource Aware Scheduler will to be able allocate additional resources to user in a fair manner. The importance of topologies can also vary. Topologies can be used for actual production or just experimentation, thus Resource Aware Scheduler will take into account the importance of a topology when determining the order in which to schedule topologies or when to evict topologies + +
+ +### Setup + +The resource guarantees of a user can be specified *conf/user-resource-pools.yaml*. Specify the resource guarantees of a user in the following format: +``` + resource.aware.scheduler.user.pools: + [UserId] + cpu: [Amount of Guarantee CPU Resources] + memory: [Amount of Guarantee Memory Resources] +``` +An example of what *user-resource-pools.yaml* can look like: +``` + resource.aware.scheduler.user.pools: + jerry: + cpu: 1000 + memory: 8192.0 + derek: + cpu: 10000.0 + memory: 32768 + bobby: + cpu: 5000.0 + memory: 16384.0 +``` +Please note that the specified amount of Guaranteed CPU and Memory can be either a integer or double + +
+ +### Specifying Topology Priority + +The range of topology priorities can range form 0-29. The topologies priorities will be partitioned into several priority levels that may contain a range of priorities. +For example we can create a priority level mapping: + + PRODUCTION => 0 – 9 + STAGING => 10 – 19 + DEV => 20 – 29 + +Thus, each priority level contains 10 sub priorities. Users can set the priority level of a topology by using the following API +``` + conf.setTopologyPriority(int priority) +``` +Parameters: +* priority – an integer representing the priority of the topology + +Please note that the 0-29 range is not a hard limit. Thus, a user can set a priority number that is higher than 29. However, the property of higher the priority number, lower the importance still holds + +
+ +### Specifying Scheduling Strategy + +A user can specify on a per topology basis what scheduling strategy to use. Users can implement the IStrategy interface and define new strategies to schedule specific topologies. This pluggable interface was created since we realize different topologies might have different scheduling needs. A user can set the topology strategy within the topology definition by using the API: +``` + public void setTopologyStrategy(Class clazz) +``` +Parameters: +* clazz – The strategy class that implements the IStrategy interface + +Example Usage: +``` + conf.setTopologyStrategy(org.apache.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategy.class); +``` +A default scheduling is provided. The DefaultResourceAwareStrategy is implemented based off the scheduling algorithm in the original paper describing resource aware scheduling in Storm: + +Peng, Boyang, Mohammad Hosseini, Zhihao Hong, Reza Farivar, and Roy Campbell. "R-storm: Resource-aware scheduling in storm." In Proceedings of the 16th Annual Middleware Conference, pp. 149-161. ACM, 2015. + +http://dl.acm.org/citation.cfm?id=2814808 + +**Please Note: Enhancements have to made on top of the original scheduling strategy as described in the paper. Please see section "Enhancements on original DefaultResourceAwareStrategy"** + +
+ +### Specifying Topology Prioritization Strategy + +The order of scheduling and eviction is determined by a pluggable interface in which the cluster owner can define how topologies should be scheduled. For the owner to define his or her own prioritization strategy, she or he needs to implement the ISchedulingPriorityStrategy interface. A user can set the scheduling priority strategy by setting the `DaemonConfig.RESOURCE_AWARE_SCHEDULER_PRIORITY_STRATEGY` to point to the class that implements the strategy. For instance: +``` + resource.aware.scheduler.priority.strategy: "org.apache.storm.scheduler.resource.strategies.priority.DefaultSchedulingPriorityStrategy" +``` + +Topologies are scheduled starting at the beginning of the list returned by this plugin. If there are not enough resources to schedule the topology others are evicted starting at the end of the list. Eviction stops when there are no lower priority topologies left to evict. + +**DefaultSchedulingPriorityStrategy** + +In the past the order of scheduling was based on the distance between a user’s current resource allocation and his or her guaranteed allocation. + +We currently use a slightly different approach. We simulate scheduling the highest priority topology for each user and score the topology for each of the resources using the formula + +``` +(Requested + Assigned - Guaranteed)/Available +``` + +Where + + * `Requested` is the resource requested by this topology (or a approximation of it for complex requests like shared memory) + * `Assigned` is the resources already assigned by the simulation. + * `Guaranteed` is the resource guarantee for this user + * `Available` is the amount of that resource currently available in the cluster. + +This gives a score that is negative for guaranteed requests and a score that is positive for requests that are not within the guarantee. + +To combine different resources the maximum of all the individual resource scores is used. This guarantees that if a user would go over a guarantee for a single resource it would not be offset by being under guarantee on any other resources. + +For Example: + +Assume we have to schedule the following topologies. + +|ID|User|CPU|Memory|Priority| +|---|----|---|------|-------| +|A-1|A|100|1,000|1| +|A-2|A|100|1,000|10| +|B-1|B|100|1,000|1| +|B-2|B|100|1,000|10| + +The cluster as a whole has 300 CPU and 4,000 Memory. + +User A is guaranteed 100 CPU and 1,000 Memory. User B is guaranteed 200 CPU and 1,500 Memory. The scores for the most important, lowest priority number, topologies for each user would be. + +``` +A-1 Score = max(CPU: (100 + 0 - 100)/300, MEM: (1,000 + 0 - 1,000)/4,000) = 0 +B-1 Score = max(CPU: (100 + 0 - 200)/300, MEM: (1,000 + 0 - 1,500)/4,000) = -0.125 +``` + +`B-1` has the lowest score so it would be the highest priority topology to schedule. In the next round the scores would be. + +``` +A-1 Score = max(CPU: (100 + 0 - 100)/200, MEM: (1,000 + 0 - 1,000)/3,000) = 0 +B-2 Score = max(CPU: (100 + 100 - 200)/200, MEM: (1,000 + 1,000 - 1,500)/3,000) = 0.167 +``` + +`A-1` has the lowest score now so it would be the next highest priority topology to schedule. + +This process would be repeated until all of the topologies are ordered, even if there are no resources left on the cluster to schedule a topology. + +**FIFOSchedulingPriorityStrategy** + +The FIFO strategy is intended more for test or staging clusters where users are running integration tests or doing development work. Topologies in these situations tend to be short lived and at times a user may forget that they are running a topology at all. + +To try and be as fair as possible to users running short lived topologies the `FIFOSchedulingPriorityStrategy` extends the `DefaultSchedulingPriorityStrategy` so that any negative score (a.k.a. a topology that fits within a user's guarantees) would remain unchanged, but positive scores are replaced with the up-time of the topology. + +This respects the guarantees of a user, but at the same time it gives priority for the rest of the resources to the most recently launched topology. Older topologies, that have probably been forgotten about, are then least likely to get resources. + +
+ +## Profiling Resource Usage + +Figuring out resource usage for your topology: + +To get an idea of how much memory/CPU your topology is actually using you can add the following to your topology launch code. + +``` + //Log all storm metrics + conf.registerMetricsConsumer(backtype.storm.metric.LoggingMetricsConsumer.class); + + //Add in per worker CPU measurement + Map workerMetrics = new HashMap(); + workerMetrics.put("CPU", "org.apache.storm.metrics.sigar.CPUMetric"); + conf.put(Config.TOPOLOGY_WORKER_METRICS, workerMetrics); +``` + +The CPU metrics will require you to add + +``` + + org.apache.storm + storm-metrics + 1.0.0 + +``` + +as a topology dependency (1.0.0 or higher). + +You can then go to your topology on the UI, turn on the system metrics, and find the log that the LoggingMetricsConsumer is writing to. It will output results in the log like. + +``` + 1454526100 node1.nodes.com:6707 -1:__system CPU {user-ms=74480, sys-ms=10780} + 1454526100 node1.nodes.com:6707 -1:__system memory/nonHeap {unusedBytes=2077536, virtualFreeBytes=-64621729, initBytes=2555904, committedBytes=66699264, maxBytes=-1, usedBytes=64621728} + 1454526100 node1.nodes.com:6707 -1:__system memory/heap {unusedBytes=573861408, virtualFreeBytes=694644256, initBytes=805306368, committedBytes=657719296, maxBytes=778502144, usedBytes=83857888} +``` + +The metrics with -1:__system are generally metrics for the entire worker. In the example above that worker is running on node1.nodes.com:6707. These metrics are collected every 60 seconds. For the CPU you can see that over the 60 seconds this worker used 74480 + 10780 = 85260 ms of CPU time. This is equivalent to 85260/60000 or about 1.5 cores. + +The Memory usage is similar but look at the usedBytes. offHeap is 64621728 or about 62MB, and onHeap is 83857888 or about 80MB, but you should know what you set your heap to in each of your workers already. How do you divide this up per bolt/spout? That is a bit harder and may require some trial and error from your end. + +
+ +## Enhancements on original DefaultResourceAwareStrategy + +The default resource aware scheduling strategy as described in the paper above has two main scheduling phases: + +1. Task Selection - Calculate the order task/executors in a topology should be scheduled +2. Node Selection - Given a task/executor, find a node to schedule the task/executor on. + +Enhancements have been made for both scheduling phases + +### Task Selection Enhancements + +Instead of using a breadth first traversal of the topology graph to create a ordering of components and its executors, a new heuristic is used that orders components by the number of in and out edges (potential connections) of the component. This is discovered to be a more effective way to co-locate executors that communicate with each other and reduce the network latency. + + +### Node Selection Enhancements + +Node selection comes down first selecting which rack (server rack) and then which node on that rack to choose. The gist of strategy in choosing a rack and node is finding the rack that has the "most" resource available and in that rack find the node with the "most" free resources. The assumption we are making for this strategy is that the node or rack with the most free resources will have the highest probability that allows us to schedule co-locate the most number of executors on the node or rack to reduce network communication latency + +Racks and nodes will be sorted from best choice to worst choice. When finding an executor, the strategy will iterate through all racks and nodes, starting from best to worst, before giving up. Racks and nodes will be sorted in the following matter: + +1. How many executors are already scheduled on the rack or node + -- This is done so we move executors to schedule closer to executors that are already scheduled and running. If a topology partially crashed and a subset of the topology's executors need to be rescheduled, we want to reschedule these executors as close (network wise) as possible to the executors that healthy and running. + +2. Subordinate resource availability or the amount "effective" resources on the rack or node + -- Please refer the section on Subordinate Resource Availability + +3. Average of the all the resource availability + -- This is simply taking the average of the percent available (available resources on node or rack divided by the available resources on rack or cluster, respectively). This situation will only be used when "effective resources" for two objects (rack or node) are the same. Then we consider the average of all the percentages of resources as a metric for sorting. For example: +``` + Avail Resources: + node 1: CPU = 50 Memory = 1024 Slots = 20 + node 2: CPU = 50 Memory = 8192 Slots = 40 + node 3: CPU = 1000 Memory = 0 Slots = 0 + + Effective resources for nodes: + node 1 = 50 / (50+50+1000) = 0.045 (CPU bound) + node 2 = 50 / (50+50+1000) = 0.045 (CPU bound) + node 3 = 0 (memory and slots are 0) +``` +ode 1 and node 2 have the same effective resources but clearly node 2 has more resources (memory and slots) than node 1 and we would want to pick node 2 first since there is a higher probability we will be able to schedule more executors on it. This is what the phase 2 averaging does + +Thus the sorting follows the following progression. Compare based on 1) and if equal then compare based on 2) and if equal compare based on 3) and if equal we break ties by arbitrarily assigning ordering based on comparing the ids of the node or rack. + +**Subordinate Resource Availability** + +Originally the getBestClustering algorithm for RAS finds the "Best" rack based on which rack has the "most available" resources by finding the rack with the biggest sum of available memory + available across all nodes in the rack. This method is not very accurate since memory and cpu usage agree values on a different scale and the values are not normalized. This method is also not effective since it does not consider the number of slots available and it fails to identifying racks that are not schedulable due to the exhaustion of one of the resources either memory, cpu, or slots. Also the previous method does not consider failures of workers. When executors of a topology gets unassigned and needs to be scheduled again, the current logic in getBestClustering may be inadequate since it will likely return a cluster that is different from where the majority of executors from the topology is originally scheduling in. + +The new strategy/algorithm to find the "best" rack or node, I dub subordinate resource availability ordering (inspired by Dominant Resource Fairness), sorts racks and nodes by the subordinate (not dominant) resource availability. + +For example given 4 racks with the following resource availabilities +``` + //generate some that has a lot of memory but little of cpu + rack-3 Avail [ CPU 100.0 MEM 200000.0 Slots 40 ] Total [ CPU 100.0 MEM 200000.0 Slots 40 ] + //generate some supervisors that are depleted of one resource + rack-2 Avail [ CPU 0.0 MEM 80000.0 Slots 40 ] Total [ CPU 0.0 MEM 80000.0 Slots 40 ] + //generate some that has a lot of cpu but little of memory + rack-4 Avail [ CPU 6100.0 MEM 10000.0 Slots 40 ] Total [ CPU 6100.0 MEM 10000.0 Slots 40 ] + //generate another rack of supervisors with less resources than rack-0 + rack-1 Avail [ CPU 2000.0 MEM 40000.0 Slots 40 ] Total [ CPU 2000.0 MEM 40000.0 Slots 40 ] + //best rack to choose + rack-0 Avail [ CPU 4000.0 MEM 80000.0 Slots 40( ] Total [ CPU 4000.0 MEM 80000.0 Slots 40 ] + Cluster Overall Avail [ CPU 12200.0 MEM 410000.0 Slots 200 ] Total [ CPU 12200.0 MEM 410000.0 Slots 200 ] +``` +It is clear that rack-0 is the best cluster since its the most balanced and can potentially schedule the most executors, while rack-2 is the worst rack since rack-2 is depleted of cpu resource thus rendering it unschedulable even though there are other resources available. + +We first calculate the resource availability percentage of all the racks for each resource by computing: + + (resource available on rack) / (resource available in cluster) + +We do this calculation to normalize the values otherwise the resource values would not be comparable. + +So for our example: +``` + rack-3 Avail [ CPU 0.819672131147541% MEM 48.78048780487805% Slots 20.0% ] effective resources: 0.00819672131147541 + rack-2 Avail [ 0.0% MEM 19.51219512195122% Slots 20.0% ] effective resources: 0.0 + rack-4 Avail [ CPU 50.0% MEM 2.4390243902439024% Slots 20.0% ] effective resources: 0.024390243902439025 + rack-1 Avail [ CPU 16.39344262295082% MEM 9.75609756097561% Slots 20.0% ] effective resources: 0.0975609756097561 + rack-0 Avail [ CPU 32.78688524590164% MEM 19.51219512195122% Slots 20.0% ] effective resources: 0.1951219512195122 +``` +The effective resource of a rack, which is also the subordinate resource, is computed by: + + MIN(resource availability percentage of {CPU, Memory, # of free Slots}). + +Then we order the racks by the effective resource. + +Thus for our example: + + Sorted rack: [rack-0, rack-1, rack-4, rack-3, rack-2] + +This metric is used in sorting for both nodes and racks. When sorting racks, we consider resources available on the rack and in the whole cluster (containing all racks). When sorting nodes, we consider resources available on the node and the resources available in the rack (sum of all resources available for all nodes in rack) + +Original Jira for this enhancement: [STORM-1766](https://issues.apache.org/jira/browse/STORM-1766) + +### Improvements in Scheduling +This section provides some experimental results on the performance benefits with the enhancements on top of the original scheduling strategy. The experiments are based off of running simulations using: + +https://github.com/jerrypeng/storm-scheduler-test-framework + +Random topologies and clusters are used in the simulation as well as a comprehensive dataset consisting of all real topologies running in all the storm clusters at Yahoo. + +The below graphs provides a comparison of how well the various strategies schedule topologies to minimize network latency. A network metric is calculated for each scheduling of a topology by each scheduling strategy. The network metric is calculated based on how many connections each executor in a topology has to make to another executor residing in the same worker (JVM process), in different worker but same host, different host, different rack. The assumption we are making is the following + +1. Intra-worker communication is the fastest +2. Inter-worker communication is fast +3. Inter-node communication is slower +4. Inter-rack communication is the slowest + +For this network metric, the larger the number is number is the more potential network latency the topology will have for this scheduling. Two types of experiments are performed. One set experiments are performed with randomly generated topologies and randomly generate clusters. The other set of experiments are performed with a dataset containing all of the running topologies at yahoo and semi-randomly generated clusters based on the size of the topology. Both set of experiments are run millions of iterations until results converge. + +For the experiments involving randomly generated topologies, an optimal strategy is implemented that exhaustively finds the most optimal solution if a solution exists. The topologies and clusters used in this experiment are relatively small so that the optimal strategy traverse to solution space to find a optimal solution in a reasonable amount of time. This strategy is not run with the Yahoo topologies since the topologies are large and would take unreasonable amount of time to run, since the solutions space is W^N (ordering doesn't matter within a worker) where W is the number of workers and N is the number of executors. The NextGenStrategy represents the scheduling strategy with these enhancements. The DefaultResourceAwareStrategy represents the original scheduling strategy. The RoundRobinStrategy represents a naive strategy that simply schedules executors in a round robin fashion while respecting the resource constraints. The graph below presents averages of the network metric. A CDF graph is also presented further down. + +| Random Topologies | Yahoo topologies | +|-------------------|------------------| +|![](images/ras_new_strategy_network_metric_random.png)| ![](images/ras_new_strategy_network_metric_yahoo_topologies.png)| + +The next graph displays how close the schedulings from the respectively scheduling strategies are to the schedulings of the optimal strategy. As explained earlier, this is only done for the random generated topologies and clusters. + +| Random Topologies | +|-------------------| +|![](images/ras_new_strategy_network_metric_improvement_random.png)| + +The below graph is a CDF of the network metric: + +| Random Topologies | Yahoo topologies | +|-------------------|------------------| +|![](images/ras_new_strategy_network_cdf_random.png)| ![](images/ras_new_strategy_network_metric_cdf_yahoo_topologies.png)| + +Below is a comparison of the how long the strategies take to run: + +| Random Topologies | Yahoo topologies | +|-------------------|------------------| +|![](images/ras_new_strategy_runtime_random.png)| ![](images/ras_new_strategy_runtime_yahoo.png)| + diff --git a/docs/Running-topologies-on-a-production-cluster.md b/docs/Running-topologies-on-a-production-cluster.md new file mode 100644 index 00000000000..b92f3b25fc6 --- /dev/null +++ b/docs/Running-topologies-on-a-production-cluster.md @@ -0,0 +1,77 @@ +--- +title: Running Topologies on a Production Cluster +layout: documentation +documentation: true +--- +Running topologies on a production cluster is similar to running in [Local mode](Local-mode.html). Here are the steps: + +1) Define the topology (Use [TopologyBuilder](javadocs/org/apache/storm/topology/TopologyBuilder.html) if defining using Java) + +2) Use [StormSubmitter](javadocs/org/apache/storm/StormSubmitter.html) to submit the topology to the cluster. `StormSubmitter` takes as input the name of the topology, a configuration for the topology, and the topology itself. For example: + +```java +Config conf = new Config(); +conf.setNumWorkers(20); +conf.setMaxSpoutPending(5000); +StormSubmitter.submitTopology("mytopology", conf, topology); +``` + +3) Create a JAR containing your topology code. You have the option to either bundle all of the dependencies of your code into that JAR (except for Storm -- the Storm JARs will be added to the classpath on the worker nodes), or you can leverage the [Classpath handling](Classpath-handling.html) features in Storm for using external libraries without bundling them into your topology JAR. + +If you're using Maven, the [Maven Assembly Plugin](http://maven.apache.org/plugins/maven-assembly-plugin/) can do the packaging for you. Just add this to your pom.xml: + +```xml + + maven-assembly-plugin + + + jar-with-dependencies + + + + com.path.to.main.Class + + + + +``` +Then run mvn assembly:assembly to get an appropriately packaged jar. Make sure you [exclude](http://maven.apache.org/plugins/maven-assembly-plugin/examples/single/including-and-excluding-artifacts.html) the Storm jars since the cluster already has Storm on the classpath. + +4) Submit the topology to the cluster using the `storm` client, specifying the path to your jar, the classname to run, and any arguments it will use: + +`storm jar path/to/allmycode.jar org.me.MyTopology arg1 arg2 arg3` + +`storm jar` will submit the jar to the cluster and configure the `StormSubmitter` class to talk to the right cluster. In this example, after uploading the jar `storm jar` calls the main function on `org.me.MyTopology` with the arguments "arg1", "arg2", and "arg3". + +You can find out how to configure your `storm` client to talk to a Storm cluster on [Setting up development environment](Setting-up-development-environment.html). + +### Common configurations + +There are a variety of configurations you can set per topology. A list of all the configurations you can set can be found [here](javadocs/org/apache/storm/Config.html). The ones prefixed with "TOPOLOGY" can be overridden on a topology-specific basis (the other ones are cluster configurations and cannot be overridden). Here are some common ones that are set for a topology: + +1. **Config.TOPOLOGY_WORKERS**: This sets the number of worker processes to use to execute the topology. For example, if you set this to 25, there will be 25 Java processes across the cluster executing all the tasks. If you had a combined 150 parallelism across all components in the topology, each worker process will have 6 tasks running within it as threads. +2. **Config.TOPOLOGY_ACKER_EXECUTORS**: This sets the number of executors that will track tuple trees and detect when a spout tuple has been fully processed. Ackers are an integral part of Storm's reliability model and you can read more about them on [Guaranteeing message processing](Guaranteeing-message-processing.html). By not setting this variable or setting it as null, Storm will set the number of acker executors to be equal to the number of workers configured for this topology. If this variable is set to 0, then Storm will immediately ack tuples as soon as they come off the spout, effectively disabling reliability. +3. **Config.TOPOLOGY_MAX_SPOUT_PENDING**: This sets the maximum number of spout tuples that can be pending on a single spout task at once (pending means the tuple has not been acked or failed yet). It is highly recommended you set this config to prevent queue explosion. +4. **Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS**: This is the maximum amount of time a spout tuple has to be fully completed before it is considered failed. This value defaults to 30 seconds, which is sufficient for most topologies. See [Guaranteeing message processing](Guaranteeing-message-processing.html) for more information on how Storm's reliability model works. +5. **Config.TOPOLOGY_SERIALIZATIONS**: You can register more serializers to Storm using this config so that you can use custom types within tuples. + + +### Killing a topology + +To kill a topology, simply run: + +`storm kill {stormname}` + +Give the same name to `storm kill` as you used when submitting the topology. + +Storm won't kill the topology immediately. Instead, it deactivates all the spouts so that they don't emit any more tuples, and then Storm waits Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS seconds before destroying all the workers. This gives the topology enough time to complete any tuples it was processing when it got killed. + +### Updating a running topology + +To update a running topology, the only option currently is to kill the current topology and resubmit a new one. A planned feature is to implement a `storm swap` command that swaps a running topology with a new one, ensuring minimal downtime and no chance of both topologies processing tuples at the same time. + +### Monitoring topologies + +The best place to monitor a topology is using the Storm UI. The Storm UI provides information about errors happening in tasks and fine-grained stats on the throughput and latency performance of each component of each running topology. + +You can also look at the worker logs on the cluster machines. diff --git a/docs/SECURITY.md b/docs/SECURITY.md new file mode 100644 index 00000000000..aacbff7ded4 --- /dev/null +++ b/docs/SECURITY.md @@ -0,0 +1,684 @@ +--- +title: Running Apache Storm Securely +layout: documentation +documentation: true +--- + +# Running Apache Storm Securely + +Apache Storm offers a range of configuration options when trying to secure +your cluster. By default all authentication and authorization is disabled but +can be turned on as needed. + +## Firewall/OS level Security + +You can still have a secure storm cluster without turning on formal +Authentication and Authorization. But to do so usually requires +configuring your Operating System to restrict the operations that can be done. +This is generally a good idea even if you plan on running your cluster with Auth. + +Storm's OS level security relies on running Storm processes using OS accounts that have only the permissions they need. +Note that workers run under the same OS account as the Supervisor daemon by default. + +The exact detail of how to setup these precautions varies a lot and is beyond +the scope of this document. + +It is generally a good idea to enable a firewall and restrict incoming network +connections to only those originating from the cluster itself and from trusted +hosts and services, a complete list of ports storm uses are below. + +If the data your cluster is processing is sensitive it might be best to setup +IPsec to encrypt all traffic being sent between the hosts in the cluster. + +### Ports + +| Default Port | Storm Config | Client Hosts/Processes | Server | +|--------------|--------------|------------------------|--------| +| 2181 | `storm.zookeeper.port` | Nimbus, Supervisors, and Worker processes | Zookeeper | +| 6627 | `nimbus.thrift.port` | Storm clients, Supervisors, and UI | Nimbus | +| 6628 | `supervisor.thrift.port` | Nimbus | Supervisors | +| 8080 | `ui.port` | Client Web Browsers | UI | +| 8000 | `logviewer.port` | Client Web Browsers | Logviewer | +| 3772 | `drpc.port` | External DRPC Clients | DRPC | +| 3773 | `drpc.invocations.port` | Worker Processes | DRPC | +| 3774 | `drpc.http.port` | External HTTP DRPC Clients | DRPC | +| 670{0,1,2,3} | `supervisor.slots.ports` | Worker Processes | Worker Processes | + + +### UI/Logviewer + +The UI and logviewer processes provide a way to not only see what a cluster is +doing, but also manipulate running topologies. In general these processes should +not be exposed except to users of the cluster. + +Some form of Authentication is typically required, and can be done using a java servlet filter + +```yaml +ui.filter: "filter.class" +ui.filter.params: "param1":"value1" +logviewer.filter: "filter.class" +logviewer.filter.params: "param1":"value1" +``` + +The `ui.filter` is an instance of `javax.servlet.Filter` that is intended to +filter all incoming requests to the UI and authenticate the request mapping +it to a "user". Typically this is done by modifying or wrapping the +`HttpServletRequest` to return the user principal through the +`getUserPrincipal()` method or returning the user name through the +`getRemoteUser()` method. If your filter authenticates in a different way you +can look at setting `ui.http.creds.plugin` to point to an instance of `IHttpCredentialsPlugin` +that can take the `HttpServletRequest` and return a user name and populate the needed fields +in the current `ReqContext`. These are advanced features and you may want to look at the +`DefaultHttpCredentialsPlugin` as an example of how to do this. + +These same settings apply to the logviewer too. If you want to have separate control +over how authentication works in the logviewer you may optionally set `logviewer.filter` +instead and it will override any `ui.filter` settings for the logviewer process. + +If the cluster is single tenant you might want to just restrict access to the UI/log +viewers ports to only accept connections from local hosts, and then front them with +another web server, like Apache httpd, that can authenticate/authorize incoming connections and +proxy the connection to the storm process. To make this work the ui process must have +logviewer.port set to the port of the proxy in its storm.yaml, while the logviewers +must have it set to the actual port that they are going to bind to. + +The servlet filters are preferred because it allows individual topologies to +specify who is and who is not allowed to access the pages associated with +them. + +Storm UI (or logviewer) can be configured to use AuthenticationFilter from hadoop-auth. +```yaml +ui.filter: "org.apache.hadoop.security.authentication.server.AuthenticationFilter" +ui.filter.params: + "type": "kerberos" + "kerberos.principal": "HTTP/nimbus.witzend.com" + "kerberos.keytab": "/vagrant/keytabs/http.keytab" + "kerberos.name.rules": "RULE:[2:$1@$0]([jt]t@.*EXAMPLE.COM)s/.*/$MAPRED_USER/ RULE:[2:$1@$0]([nd]n@.*EXAMPLE.COM)s/.*/$HDFS_USER/DEFAULT" +``` +make sure to create a principal 'HTTP/{hostname}' (here hostname should be the one where UI daemon runs +Be aware that the UI user *MUST* be HTTP. + +Once configured users needs to do kinit before accessing UI. +Ex: +curl -i --negotiate -u:anyUser -b ~/cookiejar.txt -c ~/cookiejar.txt http://storm-ui-hostname:8080/api/v1/cluster/summary + +1. Firefox: Goto about:config and search for network.negotiate-auth.trusted-uris double-click to add value "http://storm-ui-hostname:8080/" +2. Google-chrome: start from command line with: google-chrome --auth-server-whitelist="*storm-ui-hostname" --auth-negotiate-delegate-whitelist="*storm-ui-hostname" +3. IE: Configure trusted websites to include "storm-ui-hostname" and allow negotiation for that website + +**Note**: For viewing any logs via `logviewer` in secure mode, all the hosts that runs `logviewer` should also be added to the above white list. For big clusters you could white list the host's domain (for e.g. set `network.negotiate-auth.trusted-uris` to `.yourdomain.com`). + +**Caution**: In AD MIT Keberos setup the key size is bigger than the default UI jetty server request header size. Make sure you set ui.header.buffer.bytes to 65536 in storm.yaml. More details are on [STORM-633](https://issues.apache.org/jira/browse/STORM-633) + + +## DRPC HTTP + +The DRPC server optionally supports a REST endpoint as well, and you can configure authentication +on that endpoint similar to the ui/logviewer. + +The `drpc.http.filter` and `drpc.http.filter.params` configs can be used to setup a `Filter` for the DRPC server. Unlike the logviewer +it does not fall back to the UI configs as the DRPC server is intended to be REST only and often will be hit by headless users. + +The `drpc.http.creds.plugin` config can be used in cases where the default plugin is not good enough because of how authentication happens. + + +## UI / DRPC / LOGVIEWER SSL + +UI,DRPC and LOGVIEWER allows users to configure ssl . + +### UI + +For UI users needs to set following config in storm.yaml. Generating keystores with proper keys and certs should be taken care by the user before this step. + +1. ui.https.port +2. ui.https.keystore.type (example "jks") +3. ui.https.keystore.path (example "/etc/ssl/storm_keystore.jks") +4. ui.https.keystore.password (keystore password) +5. ui.https.key.password (private key password) + +optional config +6. ui.https.truststore.path (example "/etc/ssl/storm_truststore.jks") +7. ui.https.truststore.password (truststore password) +8. ui.https.truststore.type (example "jks") + +If users want to setup 2-way auth +9. ui.https.want.client.auth (If this set to true server requests for client certificate authentication, but keeps the connection if no authentication provided) +10. ui.https.need.client.auth (If this set to true server requires client to provide authentication) + + + + +### DRPC +similarly to UI , users need to configure following for DRPC + +1. drpc.https.port +2. drpc.https.keystore.type (example "jks") +3. drpc.https.keystore.path (example "/etc/ssl/storm_keystore.jks") +4. drpc.https.keystore.password (keystore password) +5. drpc.https.key.password (private key password) + +optional config +6. drpc.https.truststore.path (example "/etc/ssl/storm_truststore.jks") +7. drpc.https.truststore.password (truststore password) +8. drpc.https.truststore.type (example "jks") + +If users want to setup 2-way auth +9. drpc.https.want.client.auth (If this set to true server requests for client certificate authentication, but keeps the connection if no authentication provided) +10. drpc.https.need.client.auth (If this set to true server requires client to provide authentication) + + + + +### LOGVIEWER +similarly to UI and DRPC , users need to configure following for LOGVIEWER + +1. logviewer.https.port +2. logviewer.https.keystore.type (example "jks") +3. logviewer.https.keystore.path (example "/etc/ssl/storm_keystore.jks") +4. logviewer.https.keystore.password (keystore password) +5. logviewer.https.key.password (private key password) + +optional config +6. logviewer.https.truststore.path (example "/etc/ssl/storm_truststore.jks") +7. logviewer.https.truststore.password (truststore password) +8. logviewer.https.truststore.type (example "jks") + +If users want to setup 2-way auth +9. logviewer.https.want.client.auth (If this set to true server requests for client certificate authentication, but keeps the connection if no authentication provided) +10. logviewer.https.need.client.auth (If this set to true server requires client to provide authentication) + +## Mutual TLS (mTLS) Support + +**Available since Storm 2.7.0 (STORM-4070)** + +Storm now supports mutual TLS (mTLS) for internal Thrift RPC communication among Nimbus, Supervisors, and workers. Unlike one-way TLS, mTLS requires both parties to present and verify each other's certificates. This ensures full two-way certificate authentication and encryption. + +### Example TLS Configuration + +### 1. Nimbus Settings + +```yaml +# Thrift TLS Listener +nimbus.thrift.tls.port: 6067 +nimbus.thrift.access.log.enabled: true +nimbus.thrift.tls.server.only: true + +# Server-side certificates & truststore +nimbus.thrift.tls.server.keystore.path: /etc/ssl/server.keystore.jks +nimbus.thrift.tls.server.keystore.password: password +nimbus.thrift.tls.server.truststore.path: /etc/ssl//server.truststore.jks +nimbus.thrift.tls.server.truststore.password: password + +# Client-side certificates & transport plugin +nimbus.thrift.client.use.tls: true +nimbus.thrift.tls.client.keystore.path: /etc/ssl/client.keystore.jks +nimbus.thrift.tls.client.keystore.password: password +nimbus.thrift.tls.client.truststore.path: /etc/ssl/client.truststore.jks +nimbus.thrift.tls.client.truststore.password: password +nimbus.thrift.tls.transport: org.apache.storm.security.auth.tls.TlsTransportPlugin +``` +### 2. Supervisor Settings + +```yaml +# TLS transport plugin & client enable +supervisor.thrift.transport: org.apache.storm.security.auth.tls.TlsTransportPlugin +supervisor.thrift.client.use.tls: true + +# Supervisor as Thrift TLS server +supervisor.thrift.tls.server.keystore.path: /etc/ssl/server.keystore.jks +supervisor.thrift.tls.server.keystore.password: password +supervisor.thrift.tls.server.truststore.path: /etc/ssl/server.truststore.jks +supervisor.thrift.tls.server.truststore.password: password + +# Supervisor client settins +supervisor.thrift.tls.client.keystore.path: /etc/ssl/client.keystore.jks +supervisor.thrift.tls.client.keystore.password: password +supervisor.thrift.tls.client.truststore.path: /etc/ssl/client.truststore.jks +supervisor.thrift.tls.client.truststore.password: password +``` + +### 3. Worker Settings + +```yaml +# Storm Netty messaging TLS (worker ↔ worker) +storm.messaging.netty.tls.enable: true +storm.messaging.netty.tls.require.open.ssl: true + +# Inbound (server-side) credentials +storm.messaging.netty.tls.keystore.path: /etc/ssl/server.keystore.jks +storm.messaging.netty.tls.keystore.password: password +storm.messaging.netty.tls.truststore.path: /etc/ssl/server.truststore.jks +storm.messaging.netty.tls.truststore.password: password + +# Outbound (client-side) credentials +storm.messaging.netty.tls.client.keystore.path: /etc/ssl/client.keystore.jks +storm.messaging.netty.tls.client.keystore.password: password +storm.messaging.netty.tls.client.truststore.path: /etc/ssl/client.truststore.jks +storm.messaging.netty.tls.client.truststore.password: password +``` + +### 4. Setting Descriptions + +| Setting | Description | +|---------------------------------------------------|----------------------------------------------------------------------------------------------| +| `nimbus.thrift.tls.port` | Port on which Nimbus listens for TLS-encrypted Thrift connections (e.g., 6067) | +| `nimbus.thrift.tls.server.only` | Nimbus accepts only secure TLS connections | +| `nimbus.thrift.tls.server.keystore.path` | Path to Nimbus server keystore | +| `nimbus.thrift.tls.server.keystore.password` | Password for the Nimbus server keystore | +| `nimbus.thrift.tls.server.truststore.path` | Path to Nimbus server truststore | +| `nimbus.thrift.tls.server.truststore.password` | Password for the Nimbus truststore | +| `nimbus.thrift.client.use.tls` | Enable TLS on Nimbus outbound Thrift calls | +| `nimbus.thrift.tls.client.keystore.path` | Path to Nimbus client keystore (for outbound connections) | +| `nimbus.thrift.tls.client.keystore.password` | Password for the Nimbus client keystore | +| `nimbus.thrift.tls.client.truststore.path` | Path to Nimbus client truststore | +| `nimbus.thrift.tls.client.truststore.password` | Password for the Nimbus client truststore | +| `nimbus.thrift.tls.transport` | TLS transport plugin class for Nimbus | +| `storm.principal.tolocal` | Principal-to-local mapping class (for X.509 auth) | +| `supervisor.thrift.transport` | TLS transport plugin class for Supervisor Thrift | +| `supervisor.thrift.client.use.tls` | Enable TLS for Supervisor outbound Thrift calls | +| `supervisor.thrift.tls.server.keystore.path` | Path to Supervisor server keystore | +| `supervisor.thrift.tls.server.keystore.password` | Password for the Supervisor server keystore | +| `supervisor.thrift.tls.server.truststore.path` | Path to Supervisor server truststore | +| `supervisor.thrift.tls.server.truststore.password`| Password for the Supervisor truststore | +| `supervisor.thrift.tls.client.keystore.path` | Path to Supervisor client keystore | +| `supervisor.thrift.tls.client.keystore.password` | Password for the Supervisor client keystore | +| `supervisor.thrift.tls.client.truststore.path` | Path to Supervisor client truststore | +| `supervisor.thrift.tls.client.truststore.password`| Password for the Supervisor client truststore | +| `storm.messaging.netty.tls.enable` | Enable TLS for Storm Netty messaging (inter-worker) | +| `storm.messaging.netty.tls.require.open.ssl` | Require OpenSSL provider for Netty TLS | +| `storm.messaging.netty.tls.keystore.path` | Path to Netty server keystore | +| `storm.messaging.netty.tls.keystore.password` | Password for the Netty server keystore | +| `storm.messaging.netty.tls.truststore.path` | Path to Netty server truststore | +| `storm.messaging.netty.tls.truststore.password` | Password for the Netty server truststore | +| `storm.messaging.netty.tls.client.keystore.path` | Path to Netty client keystore | +| `storm.messaging.netty.tls.client.keystore.password`| Password for the Netty client keystore | +| `storm.messaging.netty.tls.client.truststore.path`| Path to Netty client truststore | +| `storm.messaging.netty.tls.client.truststore.password`| Password for the Netty client truststore | + + +## Authentication (Kerberos) + +Storm offers pluggable authentication support through thrift and SASL. This +example only goes off of Kerberos as it is a common setup for most big data +projects. + +Setting up a KDC and configuring kerberos on each node is beyond the scope of +this document and it is assumed that you have done that already. + +### Create Headless Principals and keytabs + +Each Zookeeper Server, Nimbus, and DRPC server will need a service principal, which, by convention, includes the FQDN of the host it will run on. Be aware that the zookeeper user *MUST* be zookeeper. +The supervisors and UI also need a principal to run as, but because they are outgoing connections they do not need to be service principals. +The following is an example of how to setup kerberos principals, but the +details may vary depending on your KDC and OS. + + +```bash +# Zookeeper (Will need one of these for each box in the Zk ensemble) +sudo kadmin.local -q 'addprinc zookeeper/zk1.example.com@STORM.EXAMPLE.COM' +sudo kadmin.local -q "ktadd -k /tmp/zk.keytab zookeeper/zk1.example.com@STORM.EXAMPLE.COM" +# Nimbus and DRPC +sudo kadmin.local -q 'addprinc storm/storm.example.com@STORM.EXAMPLE.COM' +sudo kadmin.local -q "ktadd -k /tmp/storm.keytab storm/storm.example.com@STORM.EXAMPLE.COM" +# All UI logviewer and Supervisors +sudo kadmin.local -q 'addprinc storm@STORM.EXAMPLE.COM' +sudo kadmin.local -q "ktadd -k /tmp/storm.keytab storm@STORM.EXAMPLE.COM" +``` + +be sure to distribute the keytab(s) to the appropriate boxes and set the FS permissions so that only the headless user running ZK, or storm has access to them. + +#### Storm Kerberos Configuration + +Both storm and Zookeeper use jaas configuration files to log the user in. +Each jaas file may have multiple sections for different interfaces being used. + +To enable Kerberos authentication in storm you need to set the following storm.yaml configs +```yaml +storm.thrift.transport: "org.apache.storm.security.auth.kerberos.KerberosSaslTransportPlugin" +java.security.auth.login.config: "/path/to/jaas.conf" +``` + +Nimbus and the supervisor processes will also connect to ZooKeeper(ZK) and we want to configure them to use Kerberos for authentication with ZK. To do this append +``` +-Djava.security.auth.login.config=/path/to/jaas.conf +``` + +to the childopts of nimbus, ui, and supervisor. Here is an example given the default childopts settings at the time of writing: + +```yaml +nimbus.childopts: "-Xmx1024m -Djava.security.auth.login.config=/path/to/jaas.conf" +ui.childopts: "-Xmx768m -Djava.security.auth.login.config=/path/to/jaas.conf" +supervisor.childopts: "-Xmx256m -Djava.security.auth.login.config=/path/to/jaas.conf" +``` + +The jaas.conf file should look something like the following for the storm nodes. +The StormServer section is used by nimbus and the DRPC Nodes. It does not need to be included on supervisor nodes. +The StormClient section is used by all storm clients that want to talk to nimbus, including the ui, logviewer, and supervisor. We will use this section on the gateways as well but the structure of that will be a bit different. +The Client section is used by processes wanting to talk to zookeeper and really only needs to be included with nimbus and the supervisors. +The Server section is used by the zookeeper servers. +Having unused sections in the jaas is not a problem. + +``` +StormServer { + com.sun.security.auth.module.Krb5LoginModule required + useKeyTab=true + keyTab="$keytab" + storeKey=true + useTicketCache=false + principal="$principal"; +}; +StormClient { + com.sun.security.auth.module.Krb5LoginModule required + useKeyTab=true + keyTab="$keytab" + storeKey=true + useTicketCache=false + serviceName="$nimbus_user" + principal="$principal"; +}; +Client { + com.sun.security.auth.module.Krb5LoginModule required + useKeyTab=true + keyTab="$keytab" + storeKey=true + useTicketCache=false + serviceName="zookeeper" + principal="$principal"; +}; +Server { + com.sun.security.auth.module.Krb5LoginModule required + useKeyTab=true + keyTab="$keytab" + storeKey=true + useTicketCache=false + principal="$principal"; +}; +``` + +The following is an example based off of the keytabs generated +``` +StormServer { + com.sun.security.auth.module.Krb5LoginModule required + useKeyTab=true + keyTab="/keytabs/storm.keytab" + storeKey=true + useTicketCache=false + principal="storm/storm.example.com@STORM.EXAMPLE.COM"; +}; +StormClient { + com.sun.security.auth.module.Krb5LoginModule required + useKeyTab=true + keyTab="/keytabs/storm.keytab" + storeKey=true + useTicketCache=false + serviceName="storm" + principal="storm@STORM.EXAMPLE.COM"; +}; +Client { + com.sun.security.auth.module.Krb5LoginModule required + useKeyTab=true + keyTab="/keytabs/storm.keytab" + storeKey=true + useTicketCache=false + serviceName="zookeeper" + principal="storm@STORM.EXAMPLE.COM"; +}; +Server { + com.sun.security.auth.module.Krb5LoginModule required + useKeyTab=true + keyTab="/keytabs/zk.keytab" + storeKey=true + useTicketCache=false + serviceName="zookeeper" + principal="zookeeper/zk1.example.com@STORM.EXAMPLE.COM"; +}; +``` + +Nimbus also will translate the principal into a local user name, so that other services can use this name. To configure this for Kerberos authentication set + +``` +storm.principal.tolocal: "org.apache.storm.security.auth.KerberosPrincipalToLocal" +``` + +This only needs to be done on nimbus, but it will not hurt on any node. +We also need to inform the topology who the supervisor daemon and the nimbus daemon are running as from a ZooKeeper perspective. + +``` +storm.zookeeper.superACL: "sasl:${nimbus-user}" +``` + +Here *nimbus-user* is the Kerberos user that nimbus uses to authenticate with ZooKeeper. If ZooKeeeper is stripping host and realm then this needs to have host and realm stripped too. + +#### ZooKeeper Ensemble + +Complete details of how to setup a secure ZK are beyond the scope of this document. But in general you want to enable SASL authentication on each server, and optionally strip off host and realm + +``` +authProvider.1 = org.apache.zookeeper.server.auth.SASLAuthenticationProvider +kerberos.removeHostFromPrincipal = true +kerberos.removeRealmFromPrincipal = true +``` + +And you want to include the jaas.conf on the command line when launching the server so it can use it can find the keytab. +``` +-Djava.security.auth.login.config=/jaas/zk_jaas.conf +``` + +#### Gateways + +Ideally the end user will only need to run kinit before interacting with storm. To make this happen seamlessly we need the default jaas.conf on the gateways to be something like + +``` +StormClient { + com.sun.security.auth.module.Krb5LoginModule required + doNotPrompt=false + useTicketCache=true + serviceName="$nimbus_user"; +}; +``` + +The end user can override this if they have a headless user that has a keytab. + +### Authorization Setup + +*Authentication* does the job of verifying who the user is, but we also need *authorization* to do the job of enforcing what each user can do. + +The preferred authorization plug-in for nimbus is The *SimpleACLAuthorizer*. To use the *SimpleACLAuthorizer*, set the following: + +```yaml +nimbus.authorizer: "org.apache.storm.security.auth.authorizer.SimpleACLAuthorizer" +``` + +DRPC has a separate authorizer configuration for it. Do not use SimpleACLAuthorizer for DRPC. + +The *SimpleACLAuthorizer* plug-in needs to know who the supervisor users are, and it needs to know about all of the administrator users, including the user running the ui daemon. + +These are set through *nimbus.supervisor.users* and *nimbus.admins* respectively. Each can either be a full Kerberos principal name, or the name of the user with host and realm stripped off. + +The Log servers have their own authorization configurations. These are set through *logs.users* and *logs.groups*. These should be set to the admin users or groups for all of the nodes in the cluster. + +When a topology is submitted, the submitting user can specify users in this list as well. The users and groups specified-in addition to the users in the cluster-wide setting-will be granted access to the submitted topology's worker logs in the logviewers. + +### Supervisors headless User and group Setup + +To ensure isolation of users in multi-tenancy, there is need to run supervisors and headless user and group unique to execution on the supervisor nodes. To enable this follow below steps. +1. Add headlessuser to all supervisor hosts. +2. Create unique group and make it the primary group for the headless user on the supervisor nodes. +3. The set following properties on storm for these supervisor nodes. + +### Multi-tenant Scheduler + +To support multi-tenancy better we have written a new scheduler. To enable this scheduler set. +```yaml +storm.scheduler: "org.apache.storm.scheduler.multitenant.MultitenantScheduler" +``` +Be aware that many of the features of this scheduler rely on storm authentication. Without them the scheduler will not know what the user is and will not isolate topologies properly. + +The goal of the multi-tenant scheduler is to provide a way to isolate topologies from one another, but to also limit the resources that an individual user can have in the cluster. + +The scheduler currently has one config that can be set either through =storm.yaml= or through a separate config file called =multitenant-scheduler.yaml= that should be placed in the same directory as =storm.yaml=. It is preferable to use =multitenant-scheduler.yaml= because it can be updated without needing to restart nimbus. + +There is currently only one config in =multitenant-scheduler.yaml=, =multitenant.scheduler.user.pools= is a map from the user name, to the maximum number of nodes that user is guaranteed to be able to use for their topologies. + +For example: + +```yaml +multitenant.scheduler.user.pools: + "evans": 10 + "derek": 10 +``` + +### Run worker processes as user who submitted the topology +By default storm runs workers as the user that is running the supervisor. This is not ideal for security. To make storm run the topologies as the user that launched them set. + +```yaml +supervisor.run.worker.as.user: true +``` + +There are several files that go along with this that are needed to be configured properly to make storm secure. + +The worker-launcher executable is a special program that allows the supervisor to launch workers as different users. For this to work it needs to be owned by root, but with the group set to be a group that only the supervisor headless user is a part of. +It also needs to have 6550 permissions. +There is also a worker-launcher.cfg file, usually located under `/etc/storm` that should look something like the following + +``` +storm.worker-launcher.group=$(worker_launcher_group) +min.user.id=$(min_user_id) +``` +where worker_launcher_group is the same group the supervisor is a part of, and min.user.id is set to the first real user id on the system. +This config file also needs to be owned by root and not have world or group write permissions. + +### Impersonating a user +A storm client may submit requests on behalf of another user. For example, if a `userX` submits an oozie workflow and as part of workflow execution if user `oozie` wants to submit a topology on behalf of `userX` +it can do so by leveraging the impersonation feature.In order to submit topology as some other user , you can use `StormSubmitter.submitTopologyAs` API. Alternatively you can use `NimbusClient.getConfiguredClientAs` +to get a nimbus client as some other user and perform any nimbus action(i.e. kill/rebalance/activate/deactivate) using this client. + +Impersonation authorization is disabled by default which means any user can perform impersonation. To ensure only authorized users can perform impersonation you should start nimbus with `nimbus.impersonation.authorizer` set to `org.apache.storm.security.auth.authorizer.ImpersonationAuthorizer`. +The `ImpersonationAuthorizer` uses `nimbus.impersonation.acl` as the acl to authorize users. Following is a sample nimbus config for supporting impersonation: + +```yaml +nimbus.impersonation.authorizer: org.apache.storm.security.auth.authorizer.ImpersonationAuthorizer +nimbus.impersonation.acl: + impersonating_user1: + hosts: + [comma separated list of hosts from which impersonating_user1 is allowed to impersonate other users] + groups: + [comma separated list of groups whose users impersonating_user1 is allowed to impersonate] + impersonating_user2: + hosts: + [comma separated list of hosts from which impersonating_user2 is allowed to impersonate other users] + groups: + [comma separated list of groups whose users impersonating_user2 is allowed to impersonate] +``` + +To support the oozie use case following config can be supplied: +```yaml +nimbus.impersonation.acl: + oozie: + hosts: + [oozie-host1, oozie-host2, 127.0.0.1] + groups: + [some-group-that-userX-is-part-of] +``` + +### Automatic Credentials Push and Renewal +Individual topologies have the ability to push credentials (tickets and tokens) to workers so that they can access secure services. Exposing this to all of the users can be a pain for them. +To hide this from them in the common case plugins can be used to populate the credentials, unpack them on the other side into a java Subject, and also allow Nimbus to renew the credentials if needed. These are controlled by the following configs. + +`topology.auto-credentials` is a list of java plugins, all of which must implement the `IAutoCredentials` interface, that populate the credentials on gateway +and unpack them on the worker side. On a kerberos secure cluster they should be set by default to point to `org.apache.storm.security.auth.kerberos.AutoTGT` + +`nimbus.credential.renewers.classes` should also be set to `org.apache.storm.security.auth.kerberos.AutoTGT` so that nimbus can periodically renew the TGT on behalf of the user. + +All autocredential classes that desire to implement the IMetricsRegistrant interface can register metrics automatically for each topology. The AutoTGT class currently implements this interface and adds a metric named TGT-TimeToExpiryMsecs showing the remaining time until the TGT needs to be renewed. + +`nimbus.credential.renewers.freq.secs` controls how often the renewer will poll to see if anything needs to be renewed, but the default should be fine. + +In addition Nimbus itself can be used to get credentials on behalf of the user submitting topologies. This can be configured using `nimbus.autocredential.plugins.classes` which is a list +of fully qualified class names, all of which must implement `INimbusCredentialPlugin`. Nimbus will invoke the populateCredentials method of all the configured implementation as part of topology +submission. You should use this config with `topology.auto-credentials` and `nimbus.credential.renewers.classes` so the credentials can be populated on worker side and nimbus can automatically renew +them. Currently there are 2 examples of using this config, AutoHDFS and AutoHBase which auto populates hdfs and hbase delegation tokens for topology submitter so they don't have to distribute keytabs +on all possible worker hosts. + +### Limits +By default storm allows any sized topology to be submitted. But ZK and others have limitations on how big a topology can actually be. The following configs allow you to limit the maximum size a topology can be. + +| YAML Setting | Description | +|------------|----------------------| +| nimbus.slots.perTopology | The maximum number of slots/workers a topology can use. | +| nimbus.executors.perTopology | The maximum number of executors/threads a topology can use. | + +### Log Cleanup +The Logviewer daemon now is also responsible for cleaning up old log files for dead topologies. + +| YAML Setting | Description | +|--------------|-------------------------------------| +| logviewer.cleanup.age.mins | How old (by last modification time) must a worker's log be before that log is considered for clean-up. (Living workers' logs are never cleaned up by the logviewer: Their logs are rolled via logback.) | +| logviewer.cleanup.interval.secs | Interval of time in seconds that the logviewer cleans up worker logs. | + + +### Allowing specific users or groups to access storm + + With SimpleACLAuthorizer any user with valid kerberos ticket can deploy a topology or do further operations such as activate, deactivate , access cluster information. + One can restrict this access by specifying nimbus.users or nimbus.groups. If nimbus.users configured only the users in the list can deploy a topology or access cluster. + Similarly nimbus.groups restrict storm cluster access to users who belong to those groups. + + To configure specify the following config in storm.yaml + +```yaml +nimbus.users: + - "testuser" +``` + +or + +```yaml +nimbus.groups: + - "storm" +``` + + +### DRPC + +Storm provides the Access Control List for the DRPC Authorizer.Users can see [org.apache.storm.security.auth.authorizer.DRPCSimpleACLAuthorizer](javadocs/org/apache/storm/security/auth/authorizer/DRPCSimpleACLAuthorizer.html) for more details. + +There are several DRPC ACL related configurations. + +| YAML Setting | Description | +|------------|----------------------| +| drpc.authorizer.acl | A class that will perform authorization for DRPC operations. Set this to org.apache.storm.security.auth.authorizer.DRPCSimpleACLAuthorizer when using security.| +| drpc.authorizer.acl.filename | This is the name of a file that the ACLs will be loaded from. It is separate from storm.yaml to allow the file to be updated without bringing down a DRPC server. Defaults to drpc-auth-acl.yaml | +| drpc.authorizer.acl.strict| It is useful to set this to false for staging where users may want to experiment, but true for production where you want users to be secure. Defaults to false. | + +The file pointed to by drpc.authorizer.acl.filename will have only one config in it drpc.authorizer.acl this should be of the form + +```yaml +drpc.authorizer.acl: + "functionName1": + "client.users": + - "alice" + - "bob" + "invocation.user": "bob" +``` + +In this the users bob and alice as client.users are allowed to run DRPC requests against functionName1, but only bob as the invocation.user is allowed to run the topology that actually processes those requests. + + +## Cluster Zookeeper Authentication + +Users can implement cluster Zookeeper authentication by setting several configurations are shown below. + +| YAML Setting | Description | +|------------|----------------------| +| storm.zookeeper.auth.scheme | The cluster Zookeeper authentication scheme to use, e.g. "digest". Defaults to no authentication. | +| storm.zookeeper.auth.payload | A string representing the payload for cluster Zookeeper authentication. It should only be set in the storm-cluster-auth.yaml. Users can see storm-cluster-auth.yaml.example for more details. | + + +Also, there are several configurations for topology Zookeeper authentication: + +| YAML Setting | Description | +|------------|----------------------| +| storm.zookeeper.topology.auth.scheme | The topology Zookeeper authentication scheme to use, e.g. "digest". It is the internal config and user shouldn't set it. | +| storm.zookeeper.topology.auth.payload | A string representing the payload for topology Zookeeper authentication. | + +Note: If storm.zookeeper.topology.auth.payload isn't set, Storm will generate a ZooKeeper secret payload for MD5-digest with generateZookeeperDigestSecretPayload() method. diff --git a/docs/STORM-UI-REST-API.md b/docs/STORM-UI-REST-API.md new file mode 100644 index 00000000000..b4f1d9b1fb1 --- /dev/null +++ b/docs/STORM-UI-REST-API.md @@ -0,0 +1,1616 @@ +--- +title: Storm UI REST API +layout: documentation +documentation: true +--- + + +The Storm UI daemon provides a REST API that allows you to interact with a Storm cluster, which includes retrieving +metrics data and configuration information as well as management operations such as starting or stopping topologies. + + +# Data format + +The REST API returns JSON responses and supports JSONP. +Clients can pass a callback query parameter to wrap JSON in the callback function. + + +# Using the UI REST API + +_Note: It is recommended to ignore undocumented elements in the JSON response because future versions of Storm may not_ +_support those elements anymore._ + + +## REST API Base URL + +The REST API is part of the UI daemon of Storm (started by `storm ui`) and thus runs on the same host and port as the +Storm UI (the UI daemon is often run on the same host as the Nimbus daemon). The port is configured by `ui.port`, +which is set to `8080` by default (see [defaults.yaml](conf/defaults.yaml)). + +The API base URL would thus be: + + http://:/api/v1/... + +You can use a tool such as `curl` to talk to the REST API: + + # Request the cluster configuration. + # Note: We assume ui.port is configured to the default value of 8080. + $ curl http://:8080/api/v1/cluster/configuration + +##Impersonating a user in secure environment +In a secure environment an authenticated user can impersonate another user. To impersonate a user the caller must pass +`doAsUser` param or header with value set to the user that the request needs to be performed as. Please see SECURITY.MD +to learn more about how to setup impersonation ACLs and authorization. The rest API uses the same configs and acls that +are used by nimbus. + +Examples: + +```no-highlight + 1. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1425844354\?doAsUser=testUSer1 + 2. curl 'http://localhost:8080/api/v1/topology/wordcount-1-1425844354/activate' -X POST -H 'doAsUser:testUSer1' +``` + +## GET Operations + +### /api/v1/cluster/configuration (GET) + +Returns the cluster configuration. + +Sample response (does not include all the data fields): + +```json + { + "dev.zookeeper.path": "/tmp/dev-storm-zookeeper", + "topology.tick.tuple.freq.secs": null, + "topology.builtin.metrics.bucket.size.secs": 60, + "topology.fall.back.on.java.serialization": false, + "topology.max.error.report.per.interval": 5, + "zmq.linger.millis": 5000, + "topology.skip.missing.kryo.registrations": false, + "storm.messaging.netty.client_worker_threads": 1, + "ui.childopts": "-Xmx768m", + "storm.zookeeper.session.timeout": 20000, + "nimbus.reassign": true, + "topology.trident.batch.emit.interval.millis": 500, + "storm.messaging.netty.flush.check.interval.ms": 10, + "nimbus.monitor.freq.secs": 10, + "logviewer.childopts": "-Xmx128m", + "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib", + "topology.executor.send.buffer.size": 1024, + } +``` + +### /api/v1/cluster/summary (GET) + +Returns cluster summary information such as nimbus uptime or number of supervisors. + +Response fields: + +|Field |Value|Description +|--- |--- |--- +|stormVersion|String| Storm version| +|supervisors|Integer| Number of supervisors running| +|topologies| Integer| Number of topologies running| +|slotsTotal| Integer|Total number of available worker slots| +|slotsUsed| Integer| Number of worker slots used| +|slotsFree| Integer |Number of worker slots available| +|executorsTotal| Integer |Total number of executors| +|tasksTotal| Integer |Total tasks| +|schedulerDisplayResource| Boolean | Whether to display scheduler resource information| +|totalMem| Double | The total amount of memory in the cluster in MB| +|totalCpu| Double | The total amount of CPU in the cluster| +|availMem| Double | The amount of available memory in the cluster in MB| +|availCpu| Double | The amount of available cpu in the cluster| +|memAssignedPercentUtil| Double | The percent utilization of assigned memory resources in cluster| +|cpuAssignedPercentUtil| Double | The percent utilization of assigned CPU resources in cluster| + +Sample response: + +```json + { + "stormVersion": "0.9.2-incubating-SNAPSHOT", + "supervisors": 1, + "slotsTotal": 4, + "slotsUsed": 3, + "slotsFree": 1, + "executorsTotal": 28, + "tasksTotal": 28, + "schedulerDisplayResource": true, + "totalMem": 4096.0, + "totalCpu": 400.0, + "availMem": 1024.0, + "availCPU": 250.0, + "memAssignedPercentUtil": 75.0, + "cpuAssignedPercentUtil": 37.5 + } +``` + +### /api/v1/supervisor/summary (GET) + +Returns summary information for all supervisors. + +Response fields: + +|Field |Value|Description| +|--- |--- |--- +|id| String | Supervisor's id| +|host| String| Supervisor's host name| +|uptime| String| Shows how long the supervisor is running| +|uptimeSeconds| Integer| Shows how long the supervisor is running in seconds| +|slotsTotal| Integer| Total number of available worker slots for this supervisor| +|slotsUsed| Integer| Number of worker slots used on this supervisor| +|schedulerDisplayResource| Boolean | Whether to display scheduler resource information| +|totalMem| Double| Total memory capacity on this supervisor| +|totalCpu| Double| Total CPU capacity on this supervisor| +|usedMem| Double| Used memory capacity on this supervisor| +|usedCpu| Double| Used CPU capacity on this supervisor| + +Sample response: + +```json +{ + "supervisors": [ + { + "id": "0b879808-2a26-442b-8f7d-23101e0c3696", + "host": "10.11.1.7", + "uptime": "5m 58s", + "uptimeSeconds": 358, + "slotsTotal": 4, + "slotsUsed": 3, + "totalMem": 3000, + "totalCpu": 400, + "usedMem": 1280, + "usedCPU": 160 + } + ], + "schedulerDisplayResource": true +} +``` + +### /api/v1/nimbus/summary (GET) + +Returns summary information for all nimbus hosts. + +Response fields: + +|Field |Value|Description| +|--- |--- |--- +|host| String | Nimbus' host name| +|port| int| Nimbus' port number| +|status| String| Possible values are Leader, Not a Leader, Dead| +|nimbusUpTime| String| Shows since how long the nimbus has been running| +|nimbusUpTimeSeconds| String| Shows since how long the nimbus has been running in seconds| +|nimbusLogLink| String| Logviewer url to view the nimbus.log| +|version| String| Version of storm this nimbus host is running| + +Sample response: + +```json +{ + "nimbuses":[ + { + "host":"192.168.202.1", + "port":6627, + "nimbusLogLink":"http:\/\/192.168.202.1:8000\/log?file=nimbus.log", + "status":"Leader", + "version":"0.10.0-SNAPSHOT", + "nimbusUpTime":"3m 33s", + "nimbusUpTimeSeconds":"213" + } + ] +} +``` + +### /api/v1/history/summary (GET) + +Returns a list of all running topologies' IDs submitted by the current user. + +Response fields: + +|Field |Value | Description| +|--- |--- |--- +|topo-history| List| List of Topologies' IDs| + +Sample response: + +```json +{ + "topo-history":[ + "wc6-1-1446571009", + "wc8-2-1446587178" + ] +} +``` + +### /api/v1/supervisor (GET) + +Returns summary for a supervisor by id, or all supervisors running on a host. + +Examples: + +```no-highlight + 1. By host: http://ui-daemon-host-name:8080/api/v1/supervisor?host=supervisor-daemon-host-name + 2. By id: http://ui-daemon-host-name:8080/api/v1/supervisor?id=f5449110-1daa-43e2-89e3-69917b16dec9-192.168.1.1 +``` + +Request parameters: + +|Parameter |Value |Description | +|----------|--------|-------------| +|id |String. Supervisor id | If specified, respond with the supervisor and worker stats with id. Note that when id is specified, the host argument is ignored. | +|host |String. Host name| If specified, respond with all supervisors and worker stats in the host (normally just one)| +|sys |String. Values 1 or 0. Default value 0| Controls including sys stats part of the response| + +Response fields: + +|Field |Value|Description| +|--- |--- |--- +|supervisors| Array| Array of supervisor summaries| +|workers| Array| Array of worker summaries | +|schedulerDisplayResource| Boolean | Whether to display scheduler resource information| + +Each supervisor is defined by: + +|Field |Value|Description| +|--- |--- |--- +|id| String | Supervisor's id| +|host| String| Supervisor's host name| +|uptime| String| Shows how long the supervisor is running| +|uptimeSeconds| Integer| Shows how long the supervisor is running in seconds| +|slotsTotal| Integer| Total number of worker slots for this supervisor| +|slotsUsed| Integer| Number of worker slots used on this supervisor| +|schedulerDisplayResource| Boolean | Whether to display scheduler resource information| +|totalMem| Double| Total memory capacity on this supervisor| +|totalCpu| Double| Total CPU capacity on this supervisor| +|usedMem| Double| Used memory capacity on this supervisor| +|usedCpu| Double| Used CPU capacity on this supervisor| + +Each worker is defined by: + +|Field |Value |Description| +|-------|-------|-----------| +|supervisorId | String| Supervisor's id| +|host | String | Worker's host name| +|port | Integer | Worker's port| +|topologyId | String | Topology Id| +|topologyName | String | Topology Name| +|executorsTotal | Integer | Number of executors used by the topology in this worker| +|assignedMemOnHeap | Double | Assigned On-Heap Memory by Scheduler (MB)| +|assignedMemOffHeap | Double | Assigned Off-Heap Memory by Scheduler (MB)| +|assignedCpu | Number | Assigned CPU by Scheduler (%)| +|componentNumTasks | Dictionary | Components -> # of executing tasks| +|uptime| String| Shows how long the worker is running| +|uptimeSeconds| Integer| Shows how long the worker is running in seconds| +|workerLogLink | String | Link to worker log viewer page| + +Sample response: + +```json +{ + "supervisors": [{ + "totalMem": 4096.0, + "host":"192.168.10.237", + "id":"bdfe8eff-f1d8-4bce-81f5-9d3ae1bf432e", + "uptime":"7m 8s", + "totalCpu":400.0, + "usedCpu":495.0, + "usedMem":3432.0, + "slotsUsed":2, + "version":"0.10.1", + "slotsTotal":4, + "uptimeSeconds":428 + }], + "schedulerDisplayResource":true, + "workers":[{ + "topologyName":"ras", + "topologyId":"ras-4-1460229987", + "host":"192.168.10.237", + "supervisorId":"bdfe8eff-f1d8-4bce-81f5-9d3ae1bf432e", + "assignedMemOnHeap":704.0, + "uptime":"2m 47s", + "uptimeSeconds":167, + "port":6707, + "workerLogLink":"http:\/\/host:8000\/log?file=ras-4-1460229987%2F6707%2Fworker.log", + "componentNumTasks": { + "word":5 + }, + "executorsTotal":8, + "assignedCpu":130.0, + "assignedMemOffHeap":80.0 + }, + { + "topologyName":"ras", + "topologyId":"ras-4-1460229987", + "host":"192.168.10.237", + "supervisorId":"bdfe8eff-f1d8-4bce-81f5-9d3ae1bf432e", + "assignedMemOnHeap":904.0, + "uptime":"2m 53s", + "port":6706, + "workerLogLink":"http:\/\/host:8000\/log?file=ras-4-1460229987%2F6706%2Fworker.log", + "componentNumTasks":{ + "exclaim2":2, + "exclaim1":3, + "word":5 + }, + "executorsTotal":10, + "uptimeSeconds":173, + "assignedCpu":165.0, + "assignedMemOffHeap":80.0 + }] +} +``` + +### /api/v1/owner-resources (GET) + +Returns summary information aggregated at the topology owner level. Pass an optional id for a specific owner (e.g. /api/v1/owner-resources/theowner) + +Response fields: + +|Field |Value|Description| +|--- |--- |--- +|owner |String |Topology owner +|totalTopologies|Integer|Total number of topologies owner is running +|totalExecutors |Integer|Total number of executors used by owner +|totalWorkers |Integer|Total number of workers used by owner +|totalTasks|Integer|Total number of tasks used by owner +|totalMemoryUsage|Double|Total Memory Assigned on behalf of owner in MB +|totalCpuUsage|Double|Total CPU Resource Assigned on behalf of User. Every 100 means 1 core +|memoryGuarantee|Double|The amount of memory resource (in MB) guaranteed to owner +|cpuGuarantee|Double|The amount of CPU resource (every 100 means 1 core) guaranteed to owner +|isolatedNodes|Integer|The amount of nodes that are guaranteed isolated to owner +|memoryGuaranteeRemaining|Double|The amount of guaranteed memory resources (in MB) remaining +|cpuGuaranteeRemaining|Double|The amount of guaranteed CPU resource (every 100 means 1 core) remaining +|totalReqOnHeapMem|Double| Total On-Heap Memory Requested by User in MB +|totalReqOffHeapMem|Double|Total Off-Heap Memory Requested by User in MB +|totalReqMem|Double|Total Memory Requested by User in MB +|totalReqCpu|Double|Total CPU Resource Requested by User. Every 100 means 1 core +|totalAssignedOnHeapMem|Double|Total On-Heap Memory Assigned on behalf of owner in MB +|totalAssignedOffHeapMem|Double|Total Off-Heap Memory Assigned on behalf of owner in MB + +Sample response: + +```json +{ + "owners": [ + { + "totalReqOnHeapMem": 896, + "owner": "ownerA", + "totalExecutors": 7, + "cpuGuaranteeRemaining": 30, + "totalReqMem": 896, + "cpuGuarantee": 100, + "isolatedNodes": "N/A", + "memoryGuarantee": 4000, + "memoryGuaranteeRemaining": 3104, + "totalTasks": 7, + "totalMemoryUsage": 896, + "totalReqOffHeapMem": 0, + "totalReqCpu": 70, + "totalWorkers": 2, + "totalCpuUsage": 70, + "totalAssignedOffHeapMem": 0, + "totalAssignedOnHeapMem": 896, + "totalTopologies": 1 + } + ], + "schedulerDisplayResource": true +} +``` + +### /api/v1/topology/summary (GET) + +Returns summary information for all topologies. + +Response fields: + +|Field |Value | Description| +|--- |--- |--- +|id| String| Topology Id| +|name| String| Topology Name| +|status| String| Topology Status| +|uptime| String| Shows how long the topology is running| +|uptimeSeconds| Integer| Shows how long the topology is running in seconds| +|tasksTotal| Integer |Total number of tasks for this topology| +|workersTotal| Integer |Number of workers used for this topology| +|executorsTotal| Integer |Number of executors used for this topology| +|replicationCount| Integer |Number of nimbus hosts on which this topology code is replicated| +|requestedMemOnHeap| Double|Requested On-Heap Memory by User (MB) +|requestedMemOffHeap| Double|Requested Off-Heap Memory by User (MB)| +|requestedTotalMem| Double|Requested Total Memory by User (MB)| +|requestedCpu| Double|Requested CPU by User (%)| +|assignedMemOnHeap| Double|Assigned On-Heap Memory by Scheduler (MB)| +|assignedMemOffHeap| Double|Assigned Off-Heap Memory by Scheduler (MB)| +|assignedTotalMem| Double|Assigned Total Memory by Scheduler (MB)| +|assignedCpu| Double|Assigned CPU by Scheduler (%)| +|schedulerDisplayResource| Boolean | Whether to display scheduler resource information| + +Sample response: + +```json +{ + "topologies": [ + { + "id": "WordCount3-1-1402960825", + "name": "WordCount3", + "status": "ACTIVE", + "uptime": "6m 5s", + "uptimeSeconds": 365, + "tasksTotal": 28, + "workersTotal": 3, + "executorsTotal": 28, + "replicationCount": 1, + "requestedMemOnHeap": 640, + "requestedMemOffHeap": 128, + "requestedTotalMem": 768, + "requestedCpu": 80, + "assignedMemOnHeap": 640, + "assignedMemOffHeap": 128, + "assignedTotalMem": 768, + "assignedCpu": 80 + } + ], + "schedulerDisplayResource": true +} +``` + +### /api/v1/topology-workers/\ (GET) + +Returns the worker' information (host and port) for a topology whose id is substituted for \. +The topology id is obtained by the above /topology/summary call. + +Response fields: + +|Field |Value | Description| +|--- |--- |--- +|hostPortList| List| Workers' information for a topology| +|name| Integer| Logviewer Port| + +Sample response: + +```json +{ + "hostPortList":[ + { + "host":"192.168.202.2", + "port":6701 + }, + { + "host":"192.168.202.2", + "port":6702 + }, + { + "host":"192.168.202.3", + "port":6700 + } + ], + "logviewerPort":8000 +} +``` + +### /api/v1/topology/\ (GET) + +Returns topology information and statistics. Substitute \ with the topology id. + +Request parameters: + +|Parameter |Value |Description | +|----------|--------|-------------| +|id |String (required)| Topology Id | +|window |String. Default value :all-time| Window duration for metrics in seconds| +|sys |String. Values 1 or 0. Default value 0| Controls including sys stats part of the response| + + +Response fields: + +|Field |Value |Description| +|--- |--- |--- +|id| String| Topology Id| +|name| String |Topology Name| +|uptime| String |How long the topology has been running| +|uptimeSeconds| Integer |How long the topology has been running in seconds| +|status| String |Current status of the topology, e.g. "ACTIVE"| +|tasksTotal| Integer |Total number of tasks for this topology| +|workersTotal| Integer |Number of workers used for this topology| +|executorsTotal| Integer |Number of executors used for this topology| +|msgTimeout| Integer | Number of seconds a tuple has before the spout considers it failed | +|windowHint| String | window param value in "hh mm ss" format. Default value is "All Time"| +|schedulerDisplayResource| Boolean | Whether to display scheduler resource information| +|replicationCount| Integer |Number of nimbus hosts on which this topology code is replicated| +|debug| Boolean | If debug is enabled for the topology| +|samplingPct| Double| Controls downsampling of events before they are sent to event log (percentage)| +|assignedMemOnHeap| Double|Assigned On-Heap Memory by Scheduler (MB) +|assignedMemOffHeap| Double|Assigned Off-Heap Memory by Scheduler (MB)| +|assignedTotalMem| Double|Assigned Off-Heap + On-Heap Memory by Scheduler(MB)| +|assignedCpu| Double|Assigned CPU by Scheduler(%)| +|requestedMemOnHeap| Double|Requested On-Heap Memory by User (MB) +|requestedMemOffHeap| Double|Requested Off-Heap Memory by User (MB)| +|requestedCpu| Double|Requested CPU by User (%)| +|topologyStats| Array | Array of all the topology related stats per time window| +|topologyStats.windowPretty| String |Duration passed in HH:MM:SS format| +|topologyStats.window| String |User requested time window for metrics| +|topologyStats.emitted| Long |Number of messages emitted in given window| +|topologyStats.trasferred| Long |Number messages transferred in given window| +|topologyStats.completeLatency| String (double value returned in String format) |Total latency for processing the message| +|topologyStats.acked| Long |Number of messages acked in given window| +|topologyStats.failed| Long |Number of messages failed in given window| +|workers| Array | Array of workers in topology| +|workers.supervisorId | String| Supervisor's id| +|workers.host | String | Worker's host name| +|workers.port | Integer | Worker's port| +|workers.topologyId | String | Topology Id| +|workers.topologyName | String | Topology Name| +|workers.executorsTotal | Integer | Number of executors used by the topology in this worker| +|workers.assignedMemOnHeap | Double | Assigned On-Heap Memory by Scheduler (MB)| +|workers.assignedMemOffHeap | Double | Assigned Off-Heap Memory by Scheduler (MB)| +|workers.assignedCpu | Number | Assigned CPU by Scheduler (%)| +|workers.componentNumTasks | Dictionary | Components -> # of executing tasks| +|workers.uptime| String| Shows how long the worker is running| +|workers.uptimeSeconds| Integer| Shows how long the worker is running in seconds| +|workers.workerLogLink | String | Link to worker log viewer page| +|spouts| Array | Array of all the spout components in the topology| +|spouts.spoutId| String |Spout id| +|spouts.executors| Integer |Number of executors for the spout| +|spouts.emitted| Long |Number of messages emitted in given window | +|spouts.completeLatency| String (double value returned in String format) |Total latency for processing the message| +|spouts.transferred| Long |Total number of messages transferred in given window| +|spouts.tasks| Integer |Total number of tasks for the spout| +|spouts.lastError| String |Shows the last error happened in a spout| +|spouts.errorHost| String | Worker hostname the last error was reported on| +|spouts.errorPort| String | Worker port the last error was reported on| +|spouts.errorTime| Integer | Unix timestamp the last error was reported (seconds since epoch) | +|spouts.errorLapsedSecs| Integer | Number of seconds elapsed since that last error happened in a spout| +|spouts.errorWorkerLogLink| String | Link to the worker log that reported the exception | +|spouts.acked| Long |Number of messages acked| +|spouts.failed| Long |Number of messages failed| +|spouts.requestedMemOnHeap| Double|Requested On-Heap Memory by User (MB) +|spouts.requestedMemOffHeap| Double|Requested Off-Heap Memory by User (MB)| +|spouts.requestedCpu| Double|Requested CPU by User (%)| +|bolts| Array | Array of bolt components in the topology| +|bolts.boltId| String |Bolt id| +|bolts.capacity| String (double value returned in String format) |This value indicates number of messages executed * average execute latency / time window| +|bolts.processLatency| String (double value returned in String format) |Average time of the bolt to ack a message after it was received| +|bolts.executeLatency| String (double value returned in String format) |Average time to run the execute method of the bolt| +|bolts.executors| Integer |Number of executor tasks in the bolt component| +|bolts.tasks| Integer |Number of instances of bolt| +|bolts.acked| Long |Number of tuples acked by the bolt| +|bolts.failed| Long |Number of tuples failed by the bolt| +|bolts.lastError| String |Shows the last error occurred in the bolt| +|bolts.errorHost| String | Worker hostname the last error was reported on| +|bolts.errorPort| String | Worker port the last error was reported on| +|bolts.errorTime| Integer | Unix timestamp the last error was reported (seconds since epoch) | +|bolts.errorLapsedSecs| Integer |Number of seconds elapsed since that last error happened in a bolt| +|bolts.errorWorkerLogLink| String | Link to the worker log that reported the exception | +|bolts.emitted| Long |Number of tuples emitted| +|bolts.requestedMemOnHeap| Double|Requested On-Heap Memory by User (MB) +|bolts.requestedMemOffHeap| Double|Requested Off-Heap Memory by User (MB)| +|bolts.requestedCpu| Double|Requested CPU by User (%)| + +Examples: + +```no-highlight + 1. http://ui-daemon-host-name:8080/api/v1/topology/WordCount3-1-1402960825 + 2. http://ui-daemon-host-name:8080/api/v1/topology/WordCount3-1-1402960825?sys=1 + 3. http://ui-daemon-host-name:8080/api/v1/topology/WordCount3-1-1402960825?window=600 +``` + +Sample response: + +```json + { + "name": "WordCount3", + "id": "WordCount3-1-1402960825", + "workersTotal": 3, + "window": "600", + "status": "ACTIVE", + "tasksTotal": 28, + "executorsTotal": 28, + "uptime": "29m 19s", + "uptimeSeconds": 1759, + "msgTimeout": 30, + "windowHint": "10m 0s", + "schedulerDisplayResource": true, + "workers": [{ + "topologyName": "WordCount3", + "topologyId": "WordCount3-1-1402960825", + "host": "my-host", + "supervisorId": "9124ca9a-42e8-481e-9bf3-a041d9595430", + "assignedMemOnHeap": 1452.0, + "uptime": "27m 26s", + "port": 6702, + "workerLogLink": "logs", + "componentNumTasks": { + "spout": 2, + "count": 3, + "split": 10 + }, + "executorsTotal": 15, + "uptimeSeconds": 1646, + "assignedCpu": 260.0, + "assignedMemOffHeap": 160.0 + }] + "topologyStats": [ + { + "windowPretty": "10m 0s", + "window": "600", + "emitted": 397960, + "transferred": 213380, + "completeLatency": "0.000", + "acked": 213460, + "failed": 0 + }, + { + "windowPretty": "3h 0m 0s", + "window": "10800", + "emitted": 1190260, + "transferred": 638260, + "completeLatency": "0.000", + "acked": 638280, + "failed": 0 + }, + { + "windowPretty": "1d 0h 0m 0s", + "window": "86400", + "emitted": 1190260, + "transferred": 638260, + "completeLatency": "0.000", + "acked": 638280, + "failed": 0 + }, + { + "windowPretty": "All time", + "window": ":all-time", + "emitted": 1190260, + "transferred": 638260, + "completeLatency": "0.000", + "acked": 638280, + "failed": 0 + } + ], + "workers":[{ + "topologyName":"WordCount3", + "topologyId":"WordCount3-1-1402960825", + "host":"192.168.10.237", + "supervisorId":"bdfe8eff-f1d8-4bce-81f5-9d3ae1bf432e-169.254.129.212", + "uptime":"2m 47s", + "uptimeSeconds":167, + "port":6707, + "workerLogLink":"http:\/\/192.168.10.237:8000\/log?file=WordCount3-1-1402960825%2F6707%2Fworker.log", + "componentNumTasks": { + "spout":5 + }, + "executorsTotal":8, + "assignedMemOnHeap":704.0, + "assignedCpu":130.0, + "assignedMemOffHeap":80.0 + }], + "spouts": [ + { + "executors": 5, + "emitted": 28880, + "completeLatency": "0.000", + "transferred": 28880, + "acked": 0, + "spoutId": "spout", + "tasks": 5, + "lastError": "", + "errorHost": "", + "errorPort": null, + "errorWorkerLogLink": "", + "errorTime": null, + "errorLapsedSecs": null, + "failed": 0 + } + ], + "bolts": [ + { + "executors": 12, + "emitted": 184580, + "transferred": 0, + "acked": 184640, + "executeLatency": "0.048", + "tasks": 12, + "executed": 184620, + "processLatency": "0.043", + "boltId": "count", + "lastError": "", + "errorHost": "", + "errorPort": null, + "errorWorkerLogLink": "", + "errorTime": null, + "errorLapsedSecs": null, + "capacity": "0.003", + "failed": 0 + }, + { + "executors": 8, + "emitted": 184500, + "transferred": 184500, + "acked": 28820, + "executeLatency": "0.024", + "tasks": 8, + "executed": 28780, + "processLatency": "2.112", + "boltId": "split", + "lastError": "java.lang.RuntimeException: Error here! at org.apache.storm.starter.bolt.WordCountBolt.nextTuple(WordCountBolt.java:50) at org.apache.storm.executor.bolt.BoltExecutor$2.call", + "errorHost": "192.168.10.237", + "errorPort": 6707, + "errorWorkerLogLink": "http://192.168.10.237:8000/api/v1/log?file=WordCount3-1-1402960825%2F6707%2Fworker.log", + "errorTime": 1597626060, + "errorLapsedSecs": 65, + "capacity": "0.000", + "failed": 0 + } + ], + "configuration": { + "storm.id": "WordCount3-1-1402960825", + "dev.zookeeper.path": "/tmp/dev-storm-zookeeper", + "topology.tick.tuple.freq.secs": null, + "topology.builtin.metrics.bucket.size.secs": 60, + "topology.fall.back.on.java.serialization": false, + "topology.max.error.report.per.interval": 5, + "zmq.linger.millis": 5000, + "topology.skip.missing.kryo.registrations": false, + "storm.messaging.netty.client_worker_threads": 1, + "ui.childopts": "-Xmx768m", + "storm.zookeeper.session.timeout": 20000, + "nimbus.reassign": true, + "topology.trident.batch.emit.interval.millis": 500, + "storm.messaging.netty.flush.check.interval.ms": 10, + "nimbus.monitor.freq.secs": 10, + "logviewer.childopts": "-Xmx128m", + "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib", + "topology.executor.send.buffer.size": 1024, + "storm.local.dir": "storm-local", + "storm.messaging.netty.buffer_size": 5242880, + "supervisor.worker.start.timeout.secs": 120, + "topology.enable.message.timeouts": true, + "nimbus.cleanup.inbox.freq.secs": 600, + "nimbus.inbox.jar.expiration.secs": 3600, + "drpc.worker.threads": 64, + "topology.worker.shared.thread.pool.size": 4, + "nimbus.seeds": [ + "hw10843.local" + ], + "storm.messaging.netty.min_wait_ms": 100, + "storm.zookeeper.port": 2181, + "transactional.zookeeper.port": null, + "topology.executor.receive.buffer.size": 1024, + "transactional.zookeeper.servers": null, + "storm.zookeeper.root": "/storm", + "storm.zookeeper.retry.intervalceiling.millis": 30000, + "supervisor.enable": true, + "storm.messaging.netty.server_worker_threads": 1 + }, + "replicationCount": 1 +} +``` + +### /api/v1/topology/\/metrics + +Returns detailed metrics for topology for a topology whose id is substituted for \. It shows metrics per component, which are aggregated by stream. + +|Parameter |Value |Description | +|----------|--------|-------------| +|id |String (required)| Topology Id | +|window |String. Default value :all-time| window duration for metrics in seconds| +|sys |String. Values 1 or 0. Default value 0| Controls including sys stats part of the response| + +Response fields: + +|Field |Value |Description| +|--- |--- |--- +|window |String. Default value ":all-time" | window duration for metrics in seconds| + |windowHint| String | window param value in "hh mm ss" format. Default value is "All Time"| +|spouts| Array | Array of all the spout components in the topology| +|spouts.id| String |Spout id| +|spouts.emitted| Array | Array of all the output streams this spout emits messages | +|spouts.emitted.stream_id| String | Stream id for this stream | +|spouts.emitted.value| Long | Number of messages emitted in given window| +|spouts.transferred | Array | Array of all the output streams this spout transfers messages | +|spouts.transferred.stream_id| String | Stream id for this stream | +|spouts.transferred.value| Long |Number messages transferred in given window| +|spouts.acked| Array | Array of all the output streams this spout receives ack of messages | +|spouts.acked.stream_id| String | Stream id for this stream | +|spouts.acked.value| Long |Number of messages acked in given window| +|spouts.failed| Array | Array of all the output streams this spout receives fail of messages | +|spouts.failed.stream_id| String | Stream id for this stream | +|spouts.failed.value| Long |Number of messages failed in given window| +|spouts.complete_ms_avg| Array | Array of all the output streams this spout receives ack of messages | +|spouts.complete_ms_avg.stream_id| String | Stream id for this stream | +|spouts.complete_ms_avg.value| String (double value returned in String format) | Total latency for processing the message| +|bolts| Array | Array of all the bolt components in the topology| +|bolts.id| String |Bolt id| +|bolts.emitted| Array | Array of all the output streams this bolt emits messages | +|bolts.emitted.stream_id| String | Stream id for this stream | +|bolts.emitted.value| Long | Number of messages emitted in given window| +|bolts.transferred | Array | Array of all the output streams this bolt transfers messages | +|bolts.transferred.stream_id| String | Stream id for this stream | +|bolts.transferred.value| Long |Number messages transferred in given window| +|bolts.acked| Array | Array of all the input streams this bolt acknowledges of messages | +|bolts.acked.component_id| String | Component id for this stream | +|bolts.acked.stream_id| String | Stream id for this stream | +|bolts.acked.value| Long |Number of messages acked in given window| +|bolts.failed| Array | Array of all the input streams this bolt receives fail of messages | +|bolts.failed.component_id| String | Component id for this stream | +|bolts.failed.stream_id| String | Stream id for this stream | +|bolts.failed.value| Long |Number of messages failed in given window| +|bolts.process_ms_avg| Array | Array of all the input streams this spout acks messages | +|bolts.process_ms_avg.component_id| String | Component id for this stream | +|bolts.process_ms_avg.stream_id| String | Stream id for this stream | +|bolts.process_ms_avg.value| String (double value returned in String format) |Average time of the bolt to ack a message after it was received| +|bolts.executed| Array | Array of all the input streams this bolt executes messages | +|bolts.executed.component_id| String | Component id for this stream | +|bolts.executed.stream_id| String | Stream id for this stream | +|bolts.executed.value| Long |Number of messages executed in given window| +|bolts.executed_ms_avg| Array | Array of all the output streams this spout receives ack of messages | +|bolts.executed_ms_avg.component_id| String | Component id for this stream | +|bolts.executed_ms_avg.stream_id| String | Stream id for this stream | +|bolts.executed_ms_avg.value| String (double value returned in String format) | Average time to run the execute method of the bolt| + +Examples: + +```no-highlight +1. http://ui-daemon-host-name:8080/api/v1/topology/WordCount3-1-1402960825/metrics +1. http://ui-daemon-host-name:8080/api/v1/topology/WordCount3-1-1402960825/metrics?sys=1 +2. http://ui-daemon-host-name:8080/api/v1/topology/WordCount3-1-1402960825/metrics?window=600 +``` + +Sample response: + +```json +{ + "window":":all-time", + "window-hint":"All time", + "spouts":[ + { + "id":"spout", + "emitted":[ + { + "stream_id":"__metrics", + "value":20 + }, + { + "stream_id":"default", + "value":17350280 + }, + { + "stream_id":"__ack_init", + "value":17328160 + }, + { + "stream_id":"__system", + "value":20 + } + ], + "transferred":[ + { + "stream_id":"__metrics", + "value":20 + }, + { + "stream_id":"default", + "value":17350280 + }, + { + "stream_id":"__ack_init", + "value":17328160 + }, + { + "stream_id":"__system", + "value":0 + } + ], + "acked":[ + { + "stream_id":"default", + "value":17339180 + } + ], + "failed":[ + + ], + "complete_ms_avg":[ + { + "stream_id":"default", + "value":"920.497" + } + ] + } + ], + "bolts":[ + { + "id":"count", + "emitted":[ + { + "stream_id":"__metrics", + "value":120 + }, + { + "stream_id":"default", + "value":190748180 + }, + { + "stream_id":"__ack_ack", + "value":190718100 + }, + { + "stream_id":"__system", + "value":20 + } + ], + "transferred":[ + { + "stream_id":"__metrics", + "value":120 + }, + { + "stream_id":"default", + "value":0 + }, + { + "stream_id":"__ack_ack", + "value":190718100 + }, + { + "stream_id":"__system", + "value":0 + } + ], + "acked":[ + { + "component_id":"split", + "stream_id":"default", + "value":190733160 + } + ], + "failed":[ + + ], + "process_ms_avg":[ + { + "component_id":"split", + "stream_id":"default", + "value":"0.004" + } + ], + "executed":[ + { + "component_id":"split", + "stream_id":"default", + "value":190733140 + } + ], + "executed_ms_avg":[ + { + "component_id":"split", + "stream_id":"default", + "value":"0.005" + } + ] + }, + { + "id":"split", + "emitted":[ + { + "stream_id":"__metrics", + "value":60 + }, + { + "stream_id":"default", + "value":190754740 + }, + { + "stream_id":"__ack_ack", + "value":17317580 + }, + { + "stream_id":"__system", + "value":20 + } + ], + "transferred":[ + { + "stream_id":"__metrics", + "value":60 + }, + { + "stream_id":"default", + "value":190754740 + }, + { + "stream_id":"__ack_ack", + "value":17317580 + }, + { + "stream_id":"__system", + "value":0 + } + ], + "acked":[ + { + "component_id":"spout", + "stream_id":"default", + "value":17339180 + } + ], + "failed":[ + + ], + "process_ms_avg":[ + { + "component_id":"spout", + "stream_id":"default", + "value":"0.051" + } + ], + "executed":[ + { + "component_id":"spout", + "stream_id":"default", + "value":17339240 + } + ], + "executed_ms_avg":[ + { + "component_id":"spout", + "stream_id":"default", + "value":"0.052" + } + ] + } + ] +} +``` + +### /api/v1/topology/\/component/\ (GET) + +Returns detailed metrics and executor information for a topology whose id is substituted for \ and a component whose id is substituted for \ + +|Parameter |Value |Description | +|----------|--------|-------------| +|id |String (required)| Topology Id | +|component |String (required)| Component Id | +|window |String. Default value :all-time| window duration for metrics in seconds| +|sys |String. Values 1 or 0. Default value 0| controls including sys stats part of the response| + +Response fields: + +|Field |Value |Description| +|--- |--- |--- +|user | String | Topology owner| +|id | String | Component id| +|encodedId | String | URL encoded component id| +|name | String | Topology name| +|executors| Integer |Number of executor tasks in the component| +|tasks| Integer |Number of instances of component| +|requestedMemOnHeap| Double|Requested On-Heap Memory by User (MB) +|requestedMemOffHeap| Double|Requested Off-Heap Memory by User (MB)| +|requestedCpu| Double|Requested CPU by User (%)| +|schedulerDisplayResource| Boolean | Whether to display scheduler resource information| +|topologyId| String | Topology id| +|topologyStatus| String | Topology status| +|encodedTopologyId| String | URL encoded topology id| +|window |String. Default value "All Time" | window duration for metrics in seconds| +|componentType | String | component type: SPOUT or BOLT| +|windowHint| String | window param value in "hh mm ss" format. Default value is "All Time"| +|debug| Boolean | If debug is enabled for the component| +|samplingPct| Double| Controls downsampling of events before they are sent to event log (percentage)| +|eventLogLink| String| URL viewer link to event log (debug mode)| +|profilingAndDebuggingCapable| Boolean |true if there is support for Profiling and Debugging Actions| +|profileActionEnabled| Boolean |true if worker profiling (Java Flight Recorder) is enabled| +|profilerActive| Array |Array of currently active Profiler Actions| +|componentErrors| Array of Errors | List of component errors| +|componentErrors.errorTime| Long | Timestamp when the exception occurred (Prior to 0.11.0, this field was named 'time'.)| +|componentErrors.errorHost| String | host name for the error| +|componentErrors.errorPort| String | port for the error| +|componentErrors.error| String |Shows the error happened in a component| +|componentErrors.errorLapsedSecs| Integer | Number of seconds elapsed since the error happened in a component | +|componentErrors.errorWorkerLogLink| String | Link to the worker log that reported the exception | +|spoutSummary| Array | (only for spouts) Array of component stats, one element per window.| +|spoutSummary.windowPretty| String |Duration passed in HH:MM:SS format| +|spoutSummary.window| String | window duration for metrics in seconds| +|spoutSummary.emitted| Long |Number of messages emitted in given window | +|spoutSummary.completeLatency| String (double value returned in String format) |Total latency for processing the message| +|spoutSummary.transferred| Long |Total number of messages transferred in given window| +|spoutSummary.acked| Long |Number of messages acked| +|spoutSummary.failed| Long |Number of messages failed| +|boltStats| Array | (only for bolts) Array of component stats, one element per window.| +|boltStats.windowPretty| String |Duration passed in HH:MM:SS format| +|boltStats.window| String| window duration for metrics in seconds| +|boltStats.transferred| Long |Total number of messages transferred in given window| +|boltStats.processLatency| String (double value returned in String format) |Average time of the bolt to ack a message after it was received| +|boltStats.acked| Long |Number of messages acked| +|boltStats.failed| Long |Number of messages failed| +|inputStats| Array | (only for bolts) Array of input stats| +|inputStats.component| String |Component id| +|inputStats.encodedComponentId| String |URL encoded component id| +|inputStats.executeLatency| Long | The average time a tuple spends in the execute method| +|inputStats.processLatency| Long | The average time it takes to ack a tuple after it is first received| +|inputStats.executed| Long |The number of incoming tuples processed| +|inputStats.acked| Long |Number of messages acked| +|inputStats.failed| Long |Number of messages failed| +|inputStats.stream| String |The name of the tuple stream given in the topology, or "default" if none specified| +|outputStats| Array | Array of output stats| +|outputStats.transferred| Long |Number of tuples emitted that sent to one ore more bolts| +|outputStats.emitted| Long |Number of tuples emitted| +|outputStats.stream| String |The name of the tuple stream given in the topology, or "default" if none specified| + +Examples: + +```no-highlight +1. http://ui-daemon-host-name:8080/api/v1/topology/WordCount3-1-1402960825/component/spout +2. http://ui-daemon-host-name:8080/api/v1/topology/WordCount3-1-1402960825/component/spout?sys=1 +3. http://ui-daemon-host-name:8080/api/v1/topology/WordCount3-1-1402960825/component/spout?window=600 +``` + +Sample response: + +```json +{ + "name": "WordCount3", + "id": "spout", + "componentType": "spout", + "windowHint": "10m 0s", + "executors": 5, + "componentErrors":[{"errorTime": 1406006074000, + "errorHost": "10.11.1.70", + "errorPort": 6701, + "errorWorkerLogLink": "http://10.11.1.7:8000/log?file=worker-6701.log", + "errorLapsedSecs": 16, + "error": "java.lang.RuntimeException: java.lang.StringIndexOutOfBoundsException: Some Error\n\tat org.apache.storm.utils.DisruptorQueue.consumeBatchToCursor(DisruptorQueue.java:128)\n\tat org.apache.storm.utils.DisruptorQueue.consumeBatchWhenAvailable(DisruptorQueue.java:99)\n\tat org.apache.storm.disruptor$consume_batch_when_available.invoke(disruptor.clj:80)\n\tat backtype...more.." + }], + "topologyId": "WordCount3-1-1402960825", + "tasks": 5, + "window": "600", + "profilerActive": [ + { + "host": "10.11.1.70", + "port": "6701", + "dumplink":"http:\/\/10.11.1.70:8000\/dumps\/ex-1-1452718803\/10.11.1.70%3A6701", + "timestamp":"576328" + } + ], + "profilingAndDebuggingCapable": true, + "profileActionEnabled": true, + "spoutSummary": [ + { + "windowPretty": "10m 0s", + "window": "600", + "emitted": 28500, + "transferred": 28460, + "completeLatency": "0.000", + "acked": 0, + "failed": 0 + }, + { + "windowPretty": "3h 0m 0s", + "window": "10800", + "emitted": 127640, + "transferred": 127440, + "completeLatency": "0.000", + "acked": 0, + "failed": 0 + }, + { + "windowPretty": "1d 0h 0m 0s", + "window": "86400", + "emitted": 127640, + "transferred": 127440, + "completeLatency": "0.000", + "acked": 0, + "failed": 0 + }, + { + "windowPretty": "All time", + "window": ":all-time", + "emitted": 127640, + "transferred": 127440, + "completeLatency": "0.000", + "acked": 0, + "failed": 0 + } + ], + "outputStats": [ + { + "stream": "__metrics", + "emitted": 40, + "transferred": 0, + "completeLatency": "0", + "acked": 0, + "failed": 0 + }, + { + "stream": "default", + "emitted": 28460, + "transferred": 28460, + "completeLatency": "0", + "acked": 0, + "failed": 0 + } + ], + "executorStats": [ + { + "workerLogLink": "http://10.11.1.7:8000/log?file=worker-6701.log", + "emitted": 5720, + "port": 6701, + "completeLatency": "0.000", + "transferred": 5720, + "host": "10.11.1.7", + "acked": 0, + "uptime": "43m 4s", + "uptimeSeconds": 2584, + "id": "[24-24]", + "failed": 0 + }, + { + "workerLogLink": "http://10.11.1.7:8000/log?file=worker-6703.log", + "emitted": 5700, + "port": 6703, + "completeLatency": "0.000", + "transferred": 5700, + "host": "10.11.1.7", + "acked": 0, + "uptime": "42m 57s", + "uptimeSeconds": 2577, + "id": "[25-25]", + "failed": 0 + }, + { + "workerLogLink": "http://10.11.1.7:8000/log?file=worker-6702.log", + "emitted": 5700, + "port": 6702, + "completeLatency": "0.000", + "transferred": 5680, + "host": "10.11.1.7", + "acked": 0, + "uptime": "42m 57s", + "uptimeSeconds": 2577, + "id": "[26-26]", + "failed": 0 + }, + { + "workerLogLink": "http://10.11.1.7:8000/log?file=worker-6701.log", + "emitted": 5700, + "port": 6701, + "completeLatency": "0.000", + "transferred": 5680, + "host": "10.11.1.7", + "acked": 0, + "uptime": "43m 4s", + "uptimeSeconds": 2584, + "id": "[27-27]", + "failed": 0 + }, + { + "workerLogLink": "http://10.11.1.7:8000/log?file=worker-6703.log", + "emitted": 5680, + "port": 6703, + "completeLatency": "0.000", + "transferred": 5680, + "host": "10.11.1.7", + "acked": 0, + "uptime": "42m 57s", + "uptimeSeconds": 2577, + "id": "[28-28]", + "failed": 0 + } + ] +} +``` + +## Profiling and Debugging GET Operations + +### /api/v1/topology/\/profiling/start/\/\ (GET) + +Request to start profiler on worker with timeout. Returns status and link to profiler artifacts for worker. +Substitute appropriate values for \, \ and \. + +|Parameter |Value |Description | +|----------|--------|-------------| +|id |String (required)| Topology Id | +|host-port |String (required)| Worker Id | +|timeout |String (required)| Time out for profiler to stop in minutes | + +Response fields: + +|Field |Value |Description| +|----- |----- |-----------| +|id | String | Worker id| +|status | String | Response Status | +|timeout | String | Requested timeout +|dumplink | String | Link to logviewer URL for worker profiler documents.| + +Examples: + +```no-highlight +1. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1446614150/profiling/start/10.11.1.7:6701/10 +2. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1446614150/profiling/start/10.11.1.7:6701/5 +3. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1446614150/profiling/start/10.11.1.7:6701/20 +``` + +Sample response: + +```json +{ + "status": "ok", + "id": "10.11.1.7:6701", + "timeout": "10", + "dumplink": "http:\/\/10.11.1.7:8000\/dumps\/wordcount-1-1446614150\/10.11.1.7%3A6701" +} +``` + +### /api/v1/topology/\/profiling/dumpprofile/\ (GET) + +Request to dump profiler recording on worker. Returns status and worker id for the request. +Substitute for \ and \. + +|Parameter |Value |Description | +|----------|--------|-------------| +|id |String (required)| Topology Id | +|host-port |String (required)| Worker Id | + +Response fields: + +|Field |Value |Description| +|----- |----- |-----------| +|id | String | Worker id| +|status | String | Response Status | + +Examples: + +```no-highlight +1. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1446614150/profiling/dumpprofile/10.11.1.7:6701 +``` + +Sample response: + +```json +{ + "status": "ok", + "id": "10.11.1.7:6701", +} +``` + +### /api/v1/topology/\/profiling/stop/\ (GET) + +Request to stop profiler on worker. Returns status and worker id for the request. +Substitute for \ and \. + +|Parameter |Value |Description | +|----------|--------|-------------| +|id |String (required)| Topology Id | +|host-port |String (required)| Worker Id | + +Response fields: + +|Field |Value |Description| +|----- |----- |-----------| +|id | String | Worker id| +|status | String | Response Status | + +Examples: + +```no-highlight +1. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1446614150/profiling/stop/10.11.1.7:6701 +``` + +Sample response: + +```json +{ + "status": "ok", + "id": "10.11.1.7:6701", +} +``` + +### /api/v1/topology/\/profiling/dumpjstack/\ (GET) + +Request to dump jstack on worker. Returns status and worker id for the request. +Substitute for \ and \. + +|Parameter |Value |Description | +|----------|--------|-------------| +|id |String (required)| Topology Id | +|host-port |String (required)| Worker Id | + +Response fields: + +|Field |Value |Description| +|----- |----- |-----------| +|id | String | Worker id| +|status | String | Response Status | + +Examples: + +```no-highlight +1. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1446614150/profiling/dumpjstack/10.11.1.7:6701 +``` + +Sample response: + +```json +{ + "status": "ok", + "id": "10.11.1.7:6701", +} +``` + +### /api/v1/topology/\/profiling/dumpheap/\ (GET) + +Request to dump heap (jmap) on worker. Returns status and worker id for the request. +Substitute for \ and \. + +|Parameter |Value |Description | +|----------|--------|-------------| +|id |String (required)| Topology Id | +|host-port |String (required)| Worker Id | + +Response fields: + +|Field |Value |Description| +|----- |----- |-----------| +|id | String | Worker id| +|status | String | Response Status | + +Examples: + +```no-highlight +1. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1446614150/profiling/dumpheap/10.11.1.7:6701 +``` + +Sample response: + +```json +{ + "status": "ok", + "id": "10.11.1.7:6701", +} +``` + +### /api/v1/topology/\/profiling/restartworker/\ (GET) + +Request to request the worker. Returns status and worker id for the request. +Substitute for \ and \. + +|Parameter |Value |Description | +|----------|--------|-------------| +|id |String (required)| Topology Id | +|host-port |String (required)| Worker Id | + +Response fields: + +|Field |Value |Description| +|----- |----- |-----------| +|id | String | Worker id| +|status | String | Response Status | + +Examples: + +```no-highlight +1. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1446614150/profiling/restartworker/10.11.1.7:6701 +``` + +Sample response: + +```json +{ + "status": "ok", + "id": "10.11.1.7:6701", +} +``` + +## POST Operations + +### /api/v1/topology/\/activate (POST) + +Activates a topology. Substitute for \. + +|Parameter |Value |Description | +|----------|--------|-------------| +|id |String (required)| Topology Id | + +Sample Response: + +```json +{"topologyOperation":"activate","topologyId":"wordcount-1-1420308665","status":"success"} +``` + + +### /api/v1/topology/\/deactivate (POST) + +Deactivates a topology. Substitute for \. + +|Parameter |Value |Description | +|----------|--------|-------------| +|id |String (required)| Topology Id | + +Sample Response: + +```json +{"topologyOperation":"deactivate","topologyId":"wordcount-1-1420308665","status":"success"} +``` + + +### /api/v1/topology/\/rebalance/\ (POST) + +Rebalances a topology. +Substitute for \ and \. + +|Parameter |Value |Description | +|----------|--------|-------------| +|id |String (required)| Topology Id | +|wait-time |String (required)| Wait time before rebalance happens | +|rebalanceOptions| Json (optional) | topology rebalance options | + + +Sample rebalanceOptions json: + +```json +{"rebalanceOptions" : {"numWorkers" : 2, "executors" : {"spout" :4, "count" : 10}}, "callback" : "foo"} +``` + +Examples: + +```no-highlight +curl -i -b ~/cookiejar.txt -c ~/cookiejar.txt -X POST +-H "Content-Type: application/json" +-d '{"rebalanceOptions": {"numWorkers": 2, "executors": { "spout" : "5", "split": 7, "count": 5 }}, "callback":"foo"}' +http://localhost:8080/api/v1/topology/wordcount-1-1420308665/rebalance/0 +``` + +Sample Response: + +```json +{"topologyOperation":"rebalance","topologyId":"wordcount-1-1420308665","status":"success"} +``` + + + +### /api/v1/topology/\/kill/\ (POST) + +Kills a topology. +Substitute for \ and \. + +|Parameter |Value |Description | +|----------|--------|-------------| +|id |String (required)| Topology Id | +|wait-time |String (required)| Wait time before rebalance happens | + +Caution: Small wait times (0-5 seconds) may increase the probability of triggering the bug reported in +[STORM-112](https://issues.apache.org/jira/browse/STORM-112), which may result in broker Supervisor +daemons. + +Sample Response: + +```json +{"topologyOperation":"kill","topologyId":"wordcount-1-1420308665","status":"success"} +``` + +## API errors + +The API returns 500 HTTP status codes in case of any errors. + +# DRPC REST API + +If DRPC is configured with either an http or https port it will expose a REST endpoint. (See [Setting up a Storm cluster](Setting-up-a-Storm-cluster.html) for how to do that) + +In all of these commands `:func` is the DRPC function and `:args` is the arguments to it. The only difference is in how those arguments are supplied. In all cases the response +is in the response's body. + +In all cases DRPC does not have state, so if your request times out or results in an error please retry the request, but preferably with an exponential backoff to avoid doing a +DDOS on the DRPC servers. + +### /drpc/:func (POST) + +In this case the `:args` to the drpc request are in the body of the post. + +### /drpc/:func/:args (GET) + +In this case the `:args` are supplied as a part of the URL itself. There are limitations on URL lengths by many tools, so if this is above a hundred characters it is recomended +to use the POST option instead. + +### /drpc/:func (GET) + +In some rare cases `:args` may not be needed by the DRPC command. If no `:args` section is given in the DRPC request and empty string `""` will be used for the arguments. + + diff --git a/docs/Serialization-(prior-to-0.6.0).md b/docs/Serialization-(prior-to-0.6.0).md new file mode 100644 index 00000000000..e4a0d4fd0d1 --- /dev/null +++ b/docs/Serialization-(prior-to-0.6.0).md @@ -0,0 +1,50 @@ +--- +layout: documentation +--- +Tuples can be comprised of objects of any types. Since Storm is a distributed system, it needs to know how to serialize and deserialize objects when they're passed between tasks. By default Storm can serialize ints, shorts, longs, floats, doubles, bools, bytes, strings, and byte arrays, but if you want to use another type in your tuples, you'll need to implement a custom serializer. + +### Dynamic typing + +There are no type declarations for fields in a Tuple. You put objects in fields and Storm figures out the serialization dynamically. Before we get to the interface for serialization, let's spend a moment understanding why Storm's tuples are dynamically typed. + +Adding static typing to tuple fields would add large amount of complexity to Storm's API. Hadoop, for example, statically types its keys and values but requires a huge amount of annotations on the part of the user. Hadoop's API is a burden to use and the "type safety" isn't worth it. Dynamic typing is simply easier to use. + +Further than that, it's not possible to statically type Storm's tuples in any reasonable way. Suppose a Bolt subscribes to multiple streams. The tuples from all those streams may have different types across the fields. When a Bolt receives a `Tuple` in `execute`, that tuple could have come from any stream and so could have any combination of types. There might be some reflection magic you can do to declare a different method for every tuple stream a bolt subscribes to, but Storm opts for the simpler, straightforward approach of dynamic typing. + +Finally, another reason for using dynamic typing is so Storm can be used in a straightforward manner from dynamically typed languages like Clojure and JRuby. + +### Custom serialization + +Let's dive into Storm's API for defining custom serializations. There are two steps you need to take as a user to create a custom serialization: implement the serializer, and register the serializer to Storm. + +#### Creating a serializer + +Custom serializers implement the [ISerialization](javadocs/backtype/storm/serialization/ISerialization.html) interface. Implementations specify how to serialize and deserialize types into a binary format. + +The interface looks like this: + +```java +public interface ISerialization { + public boolean accept(Class c); + public void serialize(T object, DataOutputStream stream) throws IOException; + public T deserialize(DataInputStream stream) throws IOException; +} +``` + +Storm uses the `accept` method to determine if a type can be serialized by this serializer. Remember, Storm's tuples are dynamically typed so Storm determines what serializer to use at runtime. + +`serialize` writes the object out to the output stream in binary format. The field must be written in a way such that it can be deserialized later. For example, if you're writing out a list of objects, you'll need to write out the size of the list first so that you know how many elements to deserialize. + +`deserialize` reads the serialized object off of the stream and returns it. + +You can see example serialization implementations in the source for [SerializationFactory](https://github.com/apache/incubator-storm/blob/0.5.4/src/jvm/backtype/storm/serialization/SerializationFactory.java) + +#### Registering a serializer + +Once you create a serializer, you need to tell Storm it exists. This is done through the Storm configuration (See [Concepts](Concepts.html) for information about how configuration works in Storm). You can register serializations either through the config given when submitting a topology or in the storm.yaml files across your cluster. + +Serializer registrations are done through the Config.TOPOLOGY_SERIALIZATIONS config and is simply a list of serialization class names. + +Storm provides helpers for registering serializers in a topology config. The [Config](javadocs/backtype/storm/Config.html) class has a method called `addSerialization` that takes in a serializer class to add to the config. + +There's an advanced config called Config.TOPOLOGY_SKIP_MISSING_SERIALIZATIONS. If you set this to true, Storm will ignore any serializations that are registered but do not have their code available on the classpath. Otherwise, Storm will throw errors when it can't find a serialization. This is useful if you run many topologies on a cluster that each have different serializations, but you want to declare all the serializations across all topologies in the `storm.yaml` files. diff --git a/docs/Serialization.md b/docs/Serialization.md new file mode 100644 index 00000000000..47f0606da17 --- /dev/null +++ b/docs/Serialization.md @@ -0,0 +1,68 @@ +--- +title: Serialization +layout: documentation +documentation: true +--- +This page is about how the serialization system in Storm works for versions 0.6.0 and onwards. Storm used a different serialization system prior to 0.6.0 which is documented on [Serialization (prior to 0.6.0)](Serialization-\(prior-to-0.6.0\).html). + +Tuples can be comprised of objects of any types. Since Storm is a distributed system, it needs to know how to serialize and deserialize objects when they're passed between tasks. + +Storm uses [Kryo](https://github.com/EsotericSoftware/kryo) for serialization. Kryo is a flexible and fast serialization library that produces small serializations. + +By default, Storm can serialize primitive types, strings, byte arrays, ArrayList, HashMap, and HashSet. If you want to use another type in your tuples, you'll need to register a custom serializer. + +### Dynamic typing + +There are no type declarations for fields in a Tuple. You put objects in fields and Storm figures out the serialization dynamically. Before we get to the interface for serialization, let's spend a moment understanding why Storm's tuples are dynamically typed. + +Adding static typing to tuple fields would add large amount of complexity to Storm's API. Hadoop, for example, statically types its keys and values but requires a huge amount of annotations on the part of the user. Hadoop's API is a burden to use and the "type safety" isn't worth it. Dynamic typing is simply easier to use. + +Further than that, it's not possible to statically type Storm's tuples in any reasonable way. Suppose a Bolt subscribes to multiple streams. The tuples from all those streams may have different types across the fields. When a Bolt receives a `Tuple` in `execute`, that tuple could have come from any stream and so could have any combination of types. There might be some reflection magic you can do to declare a different method for every tuple stream a bolt subscribes to, but Storm opts for the simpler, straightforward approach of dynamic typing. + +Finally, another reason for using dynamic typing is so Storm can be used in a straightforward manner from dynamically typed languages like Clojure and JRuby. + +### Custom serialization + +As mentioned, Storm uses Kryo for serialization. To implement custom serializers, you need to register new serializers with Kryo. It's highly recommended that you read over [Kryo's home page](https://github.com/EsotericSoftware/kryo) to understand how it handles custom serialization. + +Adding custom serializers is done through the "topology.kryo.register" property in your topology config or through a ServiceLoader described later. The config takes a list of registrations, where each registration can take one of two forms: + +1. The name of a class to register. In this case, Storm will use Kryo's `FieldsSerializer` to serialize the class. This may or may not be optimal for the class -- see the Kryo docs for more details. +2. A map from the name of a class to register to an implementation of [com.esotericsoftware.kryo.Serializer](https://github.com/EsotericSoftware/kryo/blob/master/src/com/esotericsoftware/kryo/Serializer.java). + +Let's look at an example. + +``` +topology.kryo.register: + - com.mycompany.CustomType1 + - com.mycompany.CustomType2: com.mycompany.serializer.CustomType2Serializer + - com.mycompany.CustomType3 +``` + +`com.mycompany.CustomType1` and `com.mycompany.CustomType3` will use the `FieldsSerializer`, whereas `com.mycompany.CustomType2` will use `com.mycompany.serializer.CustomType2Serializer` for serialization. + +Storm provides helpers for registering serializers in a topology config. The [Config](javadocs/org/apache/storm/Config.html) class has a method called `registerSerialization` that takes in a registration to add to the config. + +There's an advanced config called `Config.TOPOLOGY_SKIP_MISSING_KRYO_REGISTRATIONS`. If you set this to true, Storm will ignore any serializations that are registered but do not have their code available on the classpath. Otherwise, Storm will throw errors when it can't find a serialization. This is useful if you run many topologies on a cluster that each have different serializations, but you want to declare all the serializations across all topologies in the `storm.yaml` files. + +#### SerializationRegister Service Loader + +If you want to provide language bindings to storm, have a library that you want to interact cleanly with storm or have some other reason to provide serialization bindings and don't want to force the user to update their configs you can use the org.apache.storm.serialization.SerializationRegister service loader. + +You may use this like any other service loader and storm will register the bindings without forceing users to update their configs. The storm-clojure package uses this to provide transparent support for clojure types. + +### Java serialization + +When `Config.TOPOLOGY_FALL_BACK_ON_JAVA_SERIALIZATION` is set true, if Storm encounters a type for which it doesn't have a serialization registered, it will use Java serialization if possible. If the object can't be serialized with Java serialization, then Storm will throw an error. + +Beware that Java serialization is extremely expensive, both in terms of CPU cost as well as the size of the serialized object. It is highly recommended that you register custom serializers when you put the topology in production. The Java serialization behavior is there so that it's easy to prototype new topologies. + +You can turn on/off the behavior to fall back on Java serialization by setting the `Config.TOPOLOGY_FALL_BACK_ON_JAVA_SERIALIZATION` config to true/false. The default value is false for security reasons. + +### Component-specific serialization registrations + +Storm 0.7.0 lets you set component-specific configurations (read more about this at [Configuration](Configuration.html)). Of course, if one component defines a serialization that serialization will need to be available to other bolts -- otherwise they won't be able to receive messages from that component! + +When a topology is submitted, a single set of serializations is chosen to be used by all components in the topology for sending messages. This is done by merging the component-specific serializer registrations with the regular set of serialization registrations. If two components define serializers for the same class, one of the serializers is chosen arbitrarily. + +To force a serializer for a particular class if there's a conflict between two component-specific registrations, just define the serializer you want to use in the topology-specific configuration. The topology-specific configuration has precedence over component-specific configurations for serialization registrations. diff --git a/docs/Serializers.md b/docs/Serializers.md new file mode 100644 index 00000000000..071c8851177 --- /dev/null +++ b/docs/Serializers.md @@ -0,0 +1,4 @@ +--- +layout: documentation +--- +* [storm-json](https://github.com/rapportive-oss/storm-json): Simple JSON serializer for Storm diff --git a/docs/Setting-up-a-Storm-cluster.md b/docs/Setting-up-a-Storm-cluster.md new file mode 100644 index 00000000000..558defa5ed1 --- /dev/null +++ b/docs/Setting-up-a-Storm-cluster.md @@ -0,0 +1,135 @@ +--- +title: Setting up a Storm Cluster +layout: documentation +documentation: true +--- +This page outlines the steps for getting a Storm cluster up and running. + +If you run into difficulties with your Storm cluster, first check for a solution is in the [Troubleshooting](Troubleshooting.html) page. Otherwise, email the mailing list. + +Here's a summary of the steps for setting up a Storm cluster: + +1. Set up a Zookeeper cluster +2. Install dependencies on Nimbus and worker machines +3. Download and extract a Storm release to Nimbus and worker machines +4. Fill in mandatory configurations into storm.yaml +5. Launch daemons under supervision using "storm" script and a supervisor of your choice +6. Setup DRPC servers (Optional) + +### Set up a Zookeeper cluster + +Storm uses Zookeeper for coordinating the cluster. Zookeeper **is not** used for message passing, so the load Storm places on Zookeeper is quite low. Single node Zookeeper clusters should be sufficient for most cases, but if you want failover or are deploying large Storm clusters you may want larger Zookeeper clusters. Instructions for deploying Zookeeper are [here](http://zookeeper.apache.org/doc/r3.3.3/zookeeperAdmin.html). + +A few notes about Zookeeper deployment: + +1. It's critical that you run Zookeeper under supervision, since Zookeeper is fail-fast and will exit the process if it encounters any error case. See [here](http://zookeeper.apache.org/doc/r3.3.3/zookeeperAdmin.html#sc_supervision) for more details. +2. It's critical that you set up a cron to compact Zookeeper's data and transaction logs. The Zookeeper daemon does not do this on its own, and if you don't set up a cron, Zookeeper will quickly run out of disk space. See [here](http://zookeeper.apache.org/doc/r3.3.3/zookeeperAdmin.html#sc_maintenance) for more details. + +### Install dependencies on Nimbus and worker machines + +Next you need to install Storm's dependencies on Nimbus and the worker machines. These are: + +1. Java 11+ (Apache Storm 2.x is tested through GitHub actions against Java 11, Java 17 and Java 21) +2. Python 3.x + +These are the versions of the dependencies that have been tested with Storm. Storm may or may not work with different versions of Java and/or Python. + + +### Download and extract a Storm release to Nimbus and worker machines + +Next, download a Storm release and extract the zip file somewhere on Nimbus and each of the worker machines. The Storm releases can be downloaded [from here](../../downloads.html). + +### Fill in mandatory configurations into storm.yaml + +The Storm release contains a file at `conf/storm.yaml` that configures the Storm daemons. You can see the default configuration values [here]({{page.git-blob-base}}/conf/defaults.yaml). storm.yaml overrides anything in defaults.yaml. There's a few configurations that are mandatory to get a working cluster: + +1) **storm.zookeeper.servers**: This is a list of the hosts in the Zookeeper cluster for your Storm cluster. It should look something like: + +```yaml +storm.zookeeper.servers: + - "111.222.333.444" + - "555.666.777.888" +``` + +If the port that your Zookeeper cluster uses is different than the default, you should set **storm.zookeeper.port** as well. + +2) **storm.local.dir**: The Nimbus and Supervisor daemons require a directory on the local disk to store small amounts of state (like jars, confs, and things like that). + You should create that directory on each machine, give it proper permissions, and then fill in the directory location using this config. For example: + +```yaml +storm.local.dir: "/mnt/storm" +``` +If you run storm on windows, it could be: + +```yaml +storm.local.dir: "C:\\storm-local" +``` +If you use a relative path, it will be relative to where you installed storm(STORM_HOME). +You can leave it empty with default value `$STORM_HOME/storm-local` + +3) **nimbus.seeds**: The worker nodes need to know which machines are the candidate of master in order to download topology jars and confs. For example: + +```yaml +nimbus.seeds: ["111.222.333.44"] +``` +You're encouraged to fill out the value to list of **machine's FQDN**. If you want to set up Nimbus H/A, you have to address all machines' FQDN which run nimbus. You may want to leave it to default value when you just want to set up 'pseudo-distributed' cluster, but you're still encouraged to fill out FQDN. + +4) **supervisor.slots.ports**: For each worker machine, you configure how many workers run on that machine with this config. Each worker uses a single port for receiving messages, and this setting defines which ports are open for use. If you define five ports here, then Storm will allocate up to five workers to run on this machine. If you define three ports, Storm will only run up to three. By default, this setting is configured to run 4 workers on the ports 6700, 6701, 6702, and 6703. For example: + +```yaml +supervisor.slots.ports: + - 6700 + - 6701 + - 6702 + - 6703 +``` + +5) **drpc.servers**: If you want to setup DRPC servers they need to specified so that the workers can find them. This should be a list of the DRPC servers. For example: + +```yaml +drpc.servers: ["111.222.333.44"] +``` + +### Monitoring Health of Supervisors + +Storm provides a mechanism by which administrators can configure the supervisor to run administrator supplied scripts periodically to determine if a node is healthy or not. Administrators can have the supervisor determine if the node is in a healthy state by performing any checks of their choice in scripts located in storm.health.check.dir. If a script detects the node to be in an unhealthy state, it must return a non-zero exit code. In pre-Storm 2.x releases, a bug considered a script exit value of 0 to be a failure. This has now been fixed. The supervisor will periodically run the scripts in the health check dir and check the output. If the script’s output contains the string ERROR, as described above, the supervisor will shut down any workers and exit. + +If the supervisor is running with supervision "/bin/storm node-health-check" can be called to determine if the supervisor should be launched or if the node is unhealthy. + +The health check directory location can be configured with: + +```yaml +storm.health.check.dir: "healthchecks" + +``` +The scripts must have execute permissions. +The time to allow any given healthcheck script to run before it is marked failed due to timeout can be configured with: + +```yaml +storm.health.check.timeout.ms: 5000 +``` + +### Configure external libraries and environment variables (optional) + +If you need support from external libraries or custom plugins, you can place such jars into the extlib/ and extlib-daemon/ directories. Note that the extlib-daemon/ directory stores jars used only by daemons (Nimbus, Supervisor, DRPC, UI, Logviewer), e.g., HDFS and customized scheduling libraries. Accordingly, two environment variables STORM_EXT_CLASSPATH and STORM_EXT_CLASSPATH_DAEMON can be configured by users for including the external classpath and daemon-only external classpath. See [Classpath handling](Classpath-handling.html) for more details on using external libraries. + + +### Launch daemons under supervision using "storm" script and a supervisor of your choice + +The last step is to launch all the Storm daemons. It is critical that you run each of these daemons under supervision. Storm is a __fail-fast__ system which means the processes will halt whenever an unexpected error is encountered. Storm is designed so that it can safely halt at any point and recover correctly when the process is restarted. This is why Storm keeps no state in-process -- if Nimbus or the Supervisors restart, the running topologies are unaffected. Here's how to run the Storm daemons: + +1. **Nimbus**: Run the command `bin/storm nimbus` under supervision on the master machine. +2. **Supervisor**: Run the command `bin/storm supervisor` under supervision on each worker machine. The supervisor daemon is responsible for starting and stopping worker processes on that machine. +3. **UI**: Run the Storm UI (a site you can access from the browser that gives diagnostics on the cluster and topologies) by running the command "bin/storm ui" under supervision. The UI can be accessed by navigating your web browser to http://{ui host}:8080. + +As you can see, running the daemons is very straightforward. The daemons will log to the logs/ directory in wherever you extracted the Storm release. + +### Setup DRPC servers (Optional) + +Just like with nimbus or the supervisors you will need to launch the drpc server. To do this run the command `bin/storm drpc` on each of the machines that you configured as a part of the `drpc.servers` config. + +#### DRPC Http Setup + +DRPC optionally offers a REST API as well. To enable this set teh config `drpc.http.port` to the port you want to run on before launching the DRPC server. See the [REST documentation](STORM-UI-REST-API.html) for more information on how to use it. + +It also supports SSL by setting `drpc.https.port` along with the keystore and optional truststore similar to how you would configure the UI. diff --git a/docs/Setting-up-a-Storm-project-in-Eclipse.md b/docs/Setting-up-a-Storm-project-in-Eclipse.md new file mode 100644 index 00000000000..5137cd9e32a --- /dev/null +++ b/docs/Setting-up-a-Storm-project-in-Eclipse.md @@ -0,0 +1 @@ +- fill me in \ No newline at end of file diff --git a/docs/Setting-up-development-environment.md b/docs/Setting-up-development-environment.md new file mode 100644 index 00000000000..72e34727ed0 --- /dev/null +++ b/docs/Setting-up-development-environment.md @@ -0,0 +1,33 @@ +--- +title: Setting Up a Development Environment +layout: documentation +documentation: true +--- +This page outlines what you need to do to get a Storm development environment set up. In summary, the steps are: + +1. Download a [Storm release](../../downloads.html) , unpack it, and put the unpacked `bin/` directory on your PATH +2. To be able to start and stop topologies on a remote cluster, put the cluster information in `~/.storm/storm.yaml` + +More detail on each of these steps is below. + +### What is a development environment? + +Storm has two modes of operation: local mode and remote mode. In local mode, you can develop and test topologies completely in process on your local machine. In remote mode, you submit topologies for execution on a cluster of machines. + +A Storm development environment has everything installed so that you can develop and test Storm topologies in local mode, package topologies for execution on a remote cluster, and submit/kill topologies on a remote cluster. + +Let's quickly go over the relationship between your machine and a remote cluster. A Storm cluster is managed by a master node called "Nimbus". Your machine communicates with Nimbus to submit code (packaged as a jar) and topologies for execution on the cluster, and Nimbus will take care of distributing that code around the cluster and assigning workers to run your topology. Your machine uses a command line client called `storm` to communicate with Nimbus. The `storm` client is only used for remote mode; it is not used for developing and testing topologies in local mode. + +### Installing a Storm release locally + +If you want to be able to submit topologies to a remote cluster from your machine, you should install a Storm release locally. Installing a Storm release will give you the `storm` client that you can use to interact with remote clusters. To install Storm locally, download a release [from here](https://github.com/apache/storm/releases) and unzip it somewhere on your computer. Then add the unpacked `bin/` directory onto your `PATH` and make sure the `bin/storm` script is executable. + +Installing a Storm release locally is only for interacting with remote clusters. For developing and testing topologies in local mode, it is recommended that you use Maven to include Storm as a dev dependency for your project. You can read more about using Maven for this purpose on [Maven](Maven.html). + +### Starting and stopping topologies on a remote cluster + +The previous step installed the `storm` client on your machine which is used to communicate with remote Storm clusters. Now all you have to do is tell the client which Storm cluster to talk to. To do this, all you have to do is put the host address of the master in the `~/.storm/storm.yaml` file. It should look something like this: + +``` +nimbus.seeds: ["123.45.678.890"] +``` diff --git a/docs/Spout-implementations.md b/docs/Spout-implementations.md new file mode 100644 index 00000000000..f52e662dc04 --- /dev/null +++ b/docs/Spout-implementations.md @@ -0,0 +1,10 @@ +--- +title: Spout Implementations +layout: documentation +documentation: true +--- +* [storm-kestrel](https://github.com/nathanmarz/storm-kestrel): Adapter to use Kestrel as a spout +* [storm-amqp-spout](https://github.com/rapportive-oss/storm-amqp-spout): Adapter to use AMQP source as a spout +* [storm-jms](https://github.com/ptgoetz/storm-jms): Adapter to use a JMS source as a spout +* [storm-redis-pubsub](https://github.com/sorenmacbeth/storm-redis-pubsub): A spout that subscribes to a Redis pubsub stream +* [storm-beanstalkd-spout](https://github.com/haitaoyao/storm-beanstalkd-spout): A spout that subscribes to a beanstalkd queue diff --git a/docs/State-checkpointing.md b/docs/State-checkpointing.md new file mode 100644 index 00000000000..687ea818f83 --- /dev/null +++ b/docs/State-checkpointing.md @@ -0,0 +1,289 @@ +--- +title: Storm State Management +layout: documentation +documentation: true +--- +# State support in core storm +Storm core has abstractions for bolts to save and retrieve the state of its operations. There is a default in-memory +based state implementation and also a Redis backed implementation that provides state persistence. + +## State management +Bolts that requires its state to be managed and persisted by the framework should implement the `IStatefulBolt` interface or +extend the `BaseStatefulBolt` and implement `void initState(T state)` method. The `initState` method is invoked by the framework +during the bolt initialization with the previously saved state of the bolt. This is invoked after prepare but before the bolt starts +processing any tuples. + +Currently the only kind of `State` implementation that is supported is `KeyValueState` which provides key-value mapping. + +For example a word count bolt could use the key value state abstraction for the word counts as follows. + +1. Extend the BaseStatefulBolt and type parameterize it with KeyValueState which would store the mapping of word to count. +2. The bolt gets initialized with its previously saved state in the init method. This will contain the word counts +last committed by the framework during the previous run. +3. In the execute method, update the word count. + +```java + public class WordCountBolt extends BaseStatefulBolt> { + private KeyValueState wordCounts; + private OutputCollector collector; + ... + @Override + public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) { + this.collector = collector; + } + @Override + public void initState(KeyValueState state) { + wordCounts = state; + } + @Override + public void execute(Tuple tuple) { + String word = tuple.getString(0); + Integer count = wordCounts.get(word, 0); + count++; + wordCounts.put(word, count); + collector.emit(tuple, new Values(word, count)); + collector.ack(tuple); + } + ... + } +``` + +4. The framework periodically checkpoints the state of the bolt (default every second). The frequency +can be changed by setting the storm config `topology.state.checkpoint.interval.ms` +5. For state persistence, use a state provider that supports persistence by setting the `topology.state.provider` in the +storm config. E.g. for using Redis based key-value state implementation set `topology.state.provider: org.apache.storm.redis.state.RedisKeyValueStateProvider` +in storm.yaml. The provider implementation jar should be in the class path, which in this case means adding `storm-redis` +to dependency of your topology. +6. The state provider properties can be overridden by setting `topology.state.provider.config`. For Redis state this is a +json config with the following properties. + +``` +{ + "keyClass": "Optional fully qualified class name of the Key type.", + "valueClass": "Optional fully qualified class name of the Value type.", + "keySerializerClass": "Optional Key serializer implementation class.", + "valueSerializerClass": "Optional Value Serializer implementation class.", + "jedisPoolConfig": { + "host": "localhost", + "port": 6379, + "timeout": 2000, + "database": 0, + "password": "xyz" + } +} +``` + +For Redis Cluster state this is a json config with the following properties. + +``` + { + "keyClass": "Optional fully qualified class name of the Key type.", + "valueClass": "Optional fully qualified class name of the Value type.", + "keySerializerClass": "Optional Key serializer implementation class.", + "valueSerializerClass": "Optional Value Serializer implementation class.", + "jedisClusterConfig": { + "nodes": ["localhost:7379", "localhost:7380", "localhost:7381"], + "timeout": 2000, + "maxRedirections": 5 + } + } +``` + +NOTE: If you used Redis state with Storm version 1.1.0 or earlier, you would need to also migrate your state since the representation of state has changed +from Base64-encoded string to binary to reduce huge overhead. Storm provides a migration tool to help, which is placed on `storm-redis-example` module. + +Please download the source from download page or clone the project, and type below command: + +``` +mvn clean install -DskipTests +cd examples/storm-redis-examples +/bin/storm jar target/storm-redis-examples-*.jar org.apache.storm.redis.tools.Base64ToBinaryStateMigrationUtil [options] +``` + +Supported options are listed here: + +``` + -d,--dbnum Redis DB number (default: 0) + -h,--host Redis hostname (default: localhost) + -n,--namespace REQUIRED the list of namespace to migrate. + -p,--port Redis port (default: 6379) + --password Redis password (default: no password) +``` + +You can provide multiple `namespace` options to migrate multiple namespaces at once. +(e.g.: `--namespace total-7 --namespace partialsum-3`) +Other options are not mandatory. +Please note that you need to also migrate the key starting with "$checkpointspout-" since it's internal namespace of state. + +## Checkpoint mechanism +Checkpoint is triggered by an internal checkpoint spout at the specified `topology.state.checkpoint.interval.ms`. If there is +at-least one `IStatefulBolt` in the topology, the checkpoint spout is automatically added by the topology builder . For stateful topologies, +the topology builder wraps the `IStatefulBolt` in a `StatefulBoltExecutor` which handles the state commits on receiving the checkpoint tuples. +The non stateful bolts are wrapped in a `CheckpointTupleForwarder` which just forwards the checkpoint tuples so that the checkpoint tuples +can flow through the topology DAG. The checkpoint tuples flow through a separate internal stream namely `$checkpoint`. The topology builder +wires the checkpoint stream across the whole topology with the checkpoint spout at the root. + +``` + default default default +[spout1] ---------------> [statefulbolt1] ----------> [bolt1] --------------> [statefulbolt2] + | ----------> --------------> + | ($chpt) ($chpt) + | +[$checkpointspout] _______| ($chpt) +``` + +At checkpoint intervals the checkpoint tuples are emitted by the checkpoint spout. On receiving a checkpoint tuple, the state of the bolt +is saved and then the checkpoint tuple is forwarded to the next component. Each bolt waits for the checkpoint to arrive on all its input +streams before it saves its state so that the state represents a consistent state across the topology. Once the checkpoint spout receives +ACK from all the bolts, the state commit is complete and the transaction is recorded as committed by the checkpoint spout. + +The state checkpointing does not currently checkpoint the state of the spout. Yet, once the state of all bolts are checkpointed, and once the checkpoint tuples are acked, the tuples emitted by the spout are also acked. +It also implies that `topology.state.checkpoint.interval.ms` is lower than `topology.message.timeout.secs`. + +The state commit works like a three phase commit protocol with a prepare and commit phase so that the state across the topology is saved +in a consistent and atomic manner. + +### Recovery +The recovery phase is triggered when the topology is started for the first time. If the previous transaction was not successfully +prepared, a `rollback` message is sent across the topology so that if a bolt has some prepared transactions it can be discarded. +If the previous transaction was prepared successfully but not committed, a `commit` message is sent across the topology so that +the prepared transactions can be committed. After these steps are complete, the bolts are initialized with the state. + +The recovery is also triggered if one of the bolts fails to acknowledge the checkpoint message or say a worker crashed in +the middle. Thus when the worker is restarted by the supervisor, the checkpoint mechanism makes sure that the bolt gets +initialized with its previous state and the checkpointing continues from the point where it left off. + +### Guarantee +Storm relies on the acking mechanism to replay tuples in case of failures. It is possible that the state is committed +but the worker crashes before acking the tuples. In this case the tuples are replayed causing duplicate state updates. +Also currently the StatefulBoltExecutor continues to process the tuples from a stream after it has received a checkpoint +tuple on one stream while waiting for checkpoint to arrive on other input streams for saving the state. This can also cause +duplicate state updates during recovery. + +The state abstraction does not eliminate duplicate evaluations and currently provides only at-least once guarantee. + +In order to provide the at-least once guarantee, all bolts in a stateful topology are expected to anchor the tuples +while emitting and ack the input tuples once its processed. For non-stateful bolts, the anchoring/acking can be automatically +managed by extending the `BaseBasicBolt`. Stateful bolts are expected to anchor tuples while emitting and ack the tuple +after processing like in the `WordCountBolt` example in the State management section above. + +### IStateful bolt hooks +IStateful bolt interface provides hook methods where in the stateful bolts could implement some custom actions. + +```java + /** + * This is a hook for the component to perform some actions just before the + * framework commits its state. + */ + void preCommit(long txid); + + /** + * This is a hook for the component to perform some actions just before the + * framework prepares its state. + */ + void prePrepare(long txid); + + /** + * This is a hook for the component to perform some actions just before the + * framework rolls back the prepared state. + */ + void preRollback(); +``` + +This is optional and stateful bolts are not expected to provide any implementation. This is provided so that other +system level components can be built on top of the stateful abstractions where we might want to take some actions before the +stateful bolt's state is prepared, committed or rolled back. + +## Providing custom state implementations +Currently the only kind of `State` implementation supported is `KeyValueState` which provides key-value mapping. + +Custom state implementations should provide implementations for the methods defined in the `org.apache.storm.State` interface. +These are the `void prepareCommit(long txid)`, `void commit(long txid)`, `rollback()` methods. `commit()` method is optional +and is useful if the bolt manages the state on its own. This is currently used only by the internal system bolts, +for e.g. the CheckpointSpout to save its state. + +`KeyValueState` implementation should also implement the methods defined in the `org.apache.storm.state.KeyValueState` interface. + +### State provider +The framework instantiates the state via the corresponding `StateProvider` implementation. A custom state should also provide +a `StateProvider` implementation which can load and return the state based on the namespace. Each state belongs to a unique namespace. +The namespace is typically unique per task so that each task can have its own state. The StateProvider and the corresponding +State implementation should be available in the class path of Storm (by placing them in the extlib directory). + + +### Supported State Backends + +#### Redis + +* State provider class name (`topology.state.provider`) + +`org.apache.storm.redis.state.RedisKeyValueStateProvider` + +* Provider config (`topology.state.provider.config`) + +``` + { + "keyClass": "Optional fully qualified class name of the Key type.", + "valueClass": "Optional fully qualified class name of the Value type.", + "keySerializerClass": "Optional Key serializer implementation class.", + "valueSerializerClass": "Optional Value Serializer implementation class.", + "jedisPoolConfig": { + "host": "localhost", + "port": 6379, + "timeout": 2000, + "database": 0, + "password": "xyz" + } + } + ``` + +* Artifacts to add (`--artifacts`) + +`org.apache.storm:storm-redis:` + +#### HBase + +In order to make state scalable, HBaseKeyValueState stores state KV to a row. This introduces `non-atomic` commit phase and guarantee +eventual consistency on HBase side. It doesn't matter in point of state's view because HBaseKeyValueState can still provide not-yet-committed value. +Even if worker crashes at commit phase, after restart it will read pending-commit states (stored atomically) from HBase and states will be stored eventually. + +NOTE: HBase state provider uses pre-created table and column family, so users need to create and provide one to the provider config. + +You can simply create table via `create 'state', 'cf'` in `hbase shell` but in production you may want to give some more properties. + +* State provider class name (`topology.state.provider`) + +`org.apache.storm.hbase.state.HBaseKeyValueStateProvider` + +* Provider config (`topology.state.provider.config`) + +``` + { + "keyClass": "Optional fully qualified class name of the Key type.", + "valueClass": "Optional fully qualified class name of the Value type.", + "keySerializerClass": "Optional Key serializer implementation class.", + "valueSerializerClass": "Optional Value Serializer implementation class.", + "hbaseConfigKey": "config key to load hbase configuration from storm root configuration. (similar to storm-hbase)", + "tableName": "Pre-created table name for state.", + "columnFamily": "Pre-created column family for state." + } + ``` + +If you want to initialize HBase state provider from codebase, please see below example: + +``` +Config conf = new Config(); + Map hbConf = new HashMap(); + hbConf.put("hbase.rootdir", "file:///tmp/hbase"); + conf.put("hbase.conf", hbConf); + conf.put("topology.state.provider", "org.apache.storm.hbase.state.HBaseKeyValueStateProvider"); + conf.put("topology.state.provider.config", "{" + + " \"hbaseConfigKey\": \"hbase.conf\"," + + " \"tableName\": \"state\"," + + " \"columnFamily\": \"cf\"" + + " }"); +``` + +* Artifacts to add (`--artifacts`) + +`org.apache.storm:storm-hbase:` \ No newline at end of file diff --git a/docs/Storm-Scheduler.md b/docs/Storm-Scheduler.md new file mode 100644 index 00000000000..4b9807ea293 --- /dev/null +++ b/docs/Storm-Scheduler.md @@ -0,0 +1,27 @@ +--- +title: Scheduler +layout: documentation +documentation: true +--- + +Storm now has 4 kinds of built-in schedulers: [DefaultScheduler]({{page.git-blob-base}}/storm-server/src/main/java/org/apache/storm/scheduler/DefaultScheduler.java), [IsolationScheduler]({{page.git-blob-base}}/storm-server/src/main/java/org/apache/storm/scheduler/IsolationScheduler.java), [MultitenantScheduler]({{page.git-blob-base}}/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/MultitenantScheduler.java), [ResourceAwareScheduler](Resource_Aware_Scheduler_overview.html). + +## Pluggable scheduler +You can implement your own scheduler to replace the default scheduler to assign executors to workers. You configure the class to use the "storm.scheduler" config in your storm.yaml, and your scheduler must implement the [IScheduler]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/scheduler/IScheduler.java) interface. + +## Isolation Scheduler +The isolation scheduler makes it easy and safe to share a cluster among many topologies. The isolation scheduler lets you specify which topologies should be "isolated", meaning that they run on a dedicated set of machines within the cluster where no other topologies will be running. These isolated topologies are given priority on the cluster, so resources will be allocated to isolated topologies if there's competition with non-isolated topologies, and resources will be taken away from non-isolated topologies if necessary to get resources for an isolated topology. Once all isolated topologies are allocated, the remaining machines on the cluster are shared among all non-isolated topologies. + +You can configure the isolation scheduler in the Nimbus configuration by setting "storm.scheduler" to "org.apache.storm.scheduler.IsolationScheduler". Then, use the "isolation.scheduler.machines" config to specify how many machines each topology should get. This configuration is a map from topology name to the number of isolated machines allocated to this topology. For example: + +``` +isolation.scheduler.machines: + "my-topology": 8 + "tiny-topology": 1 + "some-other-topology": 3 +``` + +Any topologies submitted to the cluster not listed there will not be isolated. Note that there is no way for a user of Storm to affect their isolation settings – this is only allowed by the administrator of the cluster (this is very much intentional). + +The isolation scheduler solves the multi-tenancy problem – avoiding resource contention between topologies – by providing full isolation between topologies. The intention is that "productionized" topologies should be listed in the isolation config, and test or in-development topologies should not. The remaining machines on the cluster serve the dual role of failover for isolated topologies and for running the non-isolated topologies. + diff --git a/docs/Storm-multi-language-protocol-(versions-0.7.0-and-below).md b/docs/Storm-multi-language-protocol-(versions-0.7.0-and-below).md new file mode 100644 index 00000000000..1d4422f7b00 --- /dev/null +++ b/docs/Storm-multi-language-protocol-(versions-0.7.0-and-below).md @@ -0,0 +1,122 @@ +--- +layout: documentation +--- +This page explains the multilang protocol for versions 0.7.0 and below. The protocol changed in version 0.7.1. + +# Storm Multi-Language Protocol + +## The ShellBolt + +Support for multiple languages is implemented via the ShellBolt class. This +class implements the IBolt interfaces and implements the protocol for +executing a script or program via the shell using Java's ProcessBuilder class. + +## Output fields + +Output fields are part of the Thrift definition of the topology. This means that when you multilang in Java, you need to create a bolt that extends ShellBolt, implements IRichBolt, and declared the fields in `declareOutputFields`. +You can learn more about this on [Concepts](Concepts.html) + +## Protocol Preamble + +A simple protocol is implemented via the STDIN and STDOUT of the executed +script or program. A mix of simple strings and JSON encoded data are exchanged +with the process making support possible for pretty much any language. + +# Packaging Your Stuff + +To run a ShellBolt on a cluster, the scripts that are shelled out to must be +in the `resources/` directory within the jar submitted to the master. + +However, During development or testing on a local machine, the resources +directory just needs to be on the classpath. + +## The Protocol + +Notes: +* Both ends of this protocol use a line-reading mechanism, so be sure to +trim off newlines from the input and to append them to your output. +* All JSON inputs and outputs are terminated by a single line contained "end". +* The bullet points below are written from the perspective of the script writer's +STDIN and STDOUT. + + +* Your script will be executed by the Bolt. +* STDIN: A string representing a path. This is a PID directory. +Your script should create an empty file named with it's pid in this directory. e.g. +the PID is 1234, so an empty file named 1234 is created in the directory. This +file lets the supervisor know the PID so it can shutdown the process later on. +* STDOUT: Your PID. This is not JSON encoded, just a string. ShellBolt will log the PID to its log. +* STDIN: (JSON) The Storm configuration. Various settings and properties. +* STDIN: (JSON) The Topology context +* The rest happens in a while(true) loop +* STDIN: A tuple! This is a JSON encoded structure like this: + +``` +{ + // The tuple's id + "id": -6955786537413359385, + // The id of the component that created this tuple + "comp": 1, + // The id of the stream this tuple was emitted to + "stream": 1, + // The id of the task that created this tuple + "task": 9, + // All the values in this tuple + "tuple": ["snow white and the seven dwarfs", "field2", 3] +} +``` + +* STDOUT: The results of your bolt, JSON encoded. This can be a sequence of acks, fails, emits, and/or logs. Emits look like: + +``` +{ + "command": "emit", + // The ids of the tuples this output tuples should be anchored to + "anchors": [1231231, -234234234], + // The id of the stream this tuple was emitted to. Leave this empty to emit to default stream. + "stream": 1, + // If doing an emit direct, indicate the task to sent the tuple to + "task": 9, + // All the values in this tuple + "tuple": ["field1", 2, 3] +} +``` + +An ack looks like: + +``` +{ + "command": "ack", + // the id of the tuple to ack + "id": 123123 +} +``` + +A fail looks like: + +``` +{ + "command": "fail", + // the id of the tuple to fail + "id": 123123 +} +``` + +A "log" will log a message in the worker log. It looks like: + +``` +{ + "command": "log", + // the message to log + "msg": "hello world!" + +} +``` + +* STDOUT: emit "sync" as a single line by itself when the bolt has finished emitting/acking/failing and is ready for the next input + +### sync + +Note: This command is not JSON encoded, it is sent as a simple string. + +This lets the parent bolt know that the script has finished processing and is ready for another tuple. diff --git a/docs/Stream-API.md b/docs/Stream-API.md new file mode 100644 index 00000000000..06e3e367e2a --- /dev/null +++ b/docs/Stream-API.md @@ -0,0 +1,491 @@ +--- +title: Stream API Overview +layout: documentation +documentation: true +--- + +* [Concepts](#concepts) + * [Stream Builder](#streambuilder) + * [Value mapper](#valuemapper) +* [Stream APIs](#streamapis) + * [Basic transformations](#basictransformations) + * [filter](#filter) + * [map](#map) + * [flatmap](#flatmap) + * [Windowing](#windowing) + * [Transformation to key-value pairs](#keyvaluepairs) + * [mapToPair](#mapflatmaptopair) + * [flatMapToPair](#mapflatmaptopair) + * [Aggregations](#aggregations) + * [aggregate](#aggregatereduce) + * [reduce](#aggregatereduce) + * [aggregateByKey](#aggregatereducebykey) + * [reduceByKey](#aggregatereducebykey) + * [groupByKey](#groupbykey) + * [countByKey](#countbykey) + * [Repartition](#repartition) + * [Output operations](#outputoperations) + * [print](#print) + * [peek](#peek) + * [forEach](#foreach) + * [to](#to) + * [Branch](#branching) + * [Joins](#joins) + * [CoGroupByKey](#cogroupbykey) + * [State](#state) + * [updateStateByKey](#updatestatebykey) + * [stateQuery](#statequery) +* [Guarantees](#guarantees) +* [Example](#example) + +Historically Storm provided Spout and Bolt apis for expressing streaming computations. Though these apis are fairly simple to use, +there are no reusable constructs for expressing common streaming operations like filtering, transformations, windowing, joins, +aggregations and so on. + +Stream APIs build on top of the Storm's spouts and bolts to provide a typed API for expressing streaming computations and supports functional style operations such as map-reduce. + +# Concepts + +Conceptually a `Stream` can be thought of as a stream of messages flowing through a pipeline. A `Stream` may be generated by reading messages out of a source like spout, or by transforming other streams. For example, + +```java +// imports +import org.apache.storm.streams.Stream; +import org.apache.storm.streams.StreamBuilder; +... + +StreamBuilder builder = new StreamBuilder(); + +// a stream of sentences obtained from a source spout +Stream sentences = builder.newStream(new RandomSentenceSpout()).map(tuple -> tuple.getString(0)); + +// a stream of words obtained by transforming (splitting) the stream of sentences +Stream words = sentences.flatMap(s -> Arrays.asList(s.split(" "))); + +// output operation that prints the words to console +words.forEach(w -> System.out.println(w)); +``` + + +Most stream operations accept parameters that describe user-specified behavior typically via lambda expressions like `s -> Arrays.asList(s.split(" "))` as in the above example. + +A `Stream` supports two kinds of operations, + +1. **Transformations** that produce another stream from the current stream (like the `flatMap` operation in the example above) +1. **Output operations** that produce a result. (like the `forEach` operation in the example above). + +## Stream Builder + +`StreamBuilder` provides the builder apis to create a new stream. Typically a spout forms the source of a stream. + +```java +StreamBuilder builder = new StreamBuilder(); +Stream sentences = builder.newStream(new TestSentenceSpout()); +``` + +The `StreamBuilder` tracks the overall pipeline of operations expressed via the Stream. One can then create the Storm topology +via `build()` and submit it like a normal storm topology via `StormSubmitter`. + +```java +StormSubmitter.submitTopologyWithProgressBar("test", new Config(), streamBuilder.build()); +``` + +## Value mapper + +Value mappers can be used to extract specific fields from the tuples emitted from a spout to produce a typed stream of values. Value mappers are passed as arguments to the `StreamBuilder.newStream`. + +```java +StreamBuilder builder = new StreamBuilder(); + +// extract the first field from the tuple to get a Stream of sentences +Stream sentences = builder.newStream(new TestWordSpout(), new ValueMapper(0)); +``` + +Storm provides strongly typed tuples via the `Pair` and Tuple classes (Tuple3 upto Tuple10). One can use a `TupleValueMapper` to produce a stream of typed tuples as shown below. + +```java +// extract first three fields of the tuple emitted by the spout to produce a stream of typed tuples. +Stream> stream = builder.newStream(new TestSpout(), TupleValueMappers.of(0, 1, 2)); +``` + +# Stream APIs + +Storm's streaming apis (defined in [Stream](../storm-client/src/jvm/org/apache/storm/streams/Stream.java) and [PairStream](../storm-client/src/jvm/org/apache/storm/streams/PairStream.java)) currently support a wide range of operations such as transformations, filters, windowing, aggregations, branching, joins, stateful, output and debugging operations. + +## Basic transformations + +### filter + +`filter` returns a stream consisting of the elements of the stream that matches the given `Predicate` (for which the predicate returns true). + +```java +Stream logs = ... +Stream errors = logs.filter(line -> line.contains("ERROR")); +``` + +In the above example log lines with 'ERROR' are filtered into an error stream which can be then be further processed. + +### map + +`map` returns a stream consisting of the result of applying the given mapping function to the values of the stream. + +```java +Stream words = ... +Stream wordLengths = words.map(String::length); +``` + +The example generates a stream of word lengths from a stream of words by applying the String.length function on each value. Note that the type of the resultant stream of a map operation can be different from that of the original stream. + +### flatMap + +`flatMap` returns a stream consisting of the results of replacing each value of the stream with the contents produced by applying the provided mapping function to each value. This is similar to map but each value can be mapped to 0 or more values. + +```java +Stream sentences = ... +Stream words = sentences.flatMap(s -> Arrays.asList(s.split(" "))); +``` + + +In the above example, the lambda function splits each value in the stream to a list of words and the flatMap function generates a flattened stream of words out of it. + +## Windowing + +A `window` operation produces a windowed stream consisting of the elements that fall within the window as specified by the window parameter. All the windowing options supported in the underlying windowed bolts are supported via the Stream apis. + +`Stream windowedStream = stream.window(Window windowConfig);` + +The windowConfig parameter specifies the windowing config like sliding or tumbling windows based on time duration or event count. + +```java +// time based sliding window +stream.window(SlidingWindows.of(Duration.minutes(10), Duration.minutes(1))); + +// count based sliding window +stream.window(SlidingWindows.of(Count.(10), Count.of(2))); + +// tumbling window +stream.window(TumblingWindows.of(Duration.seconds(10)); + +// specifying timestamp field for event time based processing and a late tuple stream. +stream.window(TumblingWindows.of(Duration.seconds(10) + .withTimestampField("ts") + .withLateTupleStream("late_events")); +``` + +A windowing operation splits the continuous stream of values into subsets and is necessary for performing operations like Joins and Aggregations. + +## Transformation to key-value pairs + +### mapToPair and flatMapToPair + +These operations transform a Stream of values into a stream of key-value pairs. + +```java +Stream integers = … // 1, 2, 3, 4, ... +PairStream squares = integers.mapToPair(x -> Pair.of(x, x*x)); // (1, 1), (2, 4), (3, 9), (4, 16), ... +``` + +A key-value pair stream is required for operations like groupByKey, aggregateByKey, joins etc. + +## Aggregations + +Aggregate operations aggregate the values (or key-values) in a stream. Typically the aggregation operations are performed on a windowed stream where the aggregate results are emitted on each window activation. + +### aggregate and reduce + +`aggregate` and `reduce` computes global aggregation i.e. the values across all partitions are forwarded to a single task for computing the aggregate. + +```java +Stream numbers = … +// aggregate the numbers and produce a stream of last 10 sec sums. +Stream sums = numbers.window(TumblingWindows.of(Duration.seconds(10)).aggregate(new Sum()); + +// the last 10 sec sums computed using reduce +Stream sums = numbers.window(...).reduce((x, y) -> x + y); +``` + +`aggregate` and `reduce` differs in the way in which the aggregate results are computed. + +A `reduce` operation repeatedly applies the given reducer and reduces two values to a single value until there is only one value left. This may not be feasible or easy for all kinds of aggreagations (e.g. avg). + +An `aggregate` operation does a mutable reduction. A mutable reduction accumulates results into an accumulator as it processes the values. + +The aggregation operations (aggregate and reduce) automatically does a local aggregation whenever possible before doing the network shuffle to minimize the amount of messages transmitted over the network. For example to compute sum, a per-partition partial sum is computed and only the partial sums are transferred over the network to the target bolt where the partial sums are merged to produce the final sum. A `CombinerAggregator` interface is used as the argument of `aggregate` to enable this. + +For example the `Sum` (passed as the argument of aggregate in the example above) can be implemented as a `CombinerAggregator` as follows. + +```java +public class Sum implements CombinerAggregator { + + // The initial value of the sum + @Override + public Long init() { + return 0L; + } + + // Updates the sum by adding the value (this could be a partial sum) + @Override + public Long apply(Long aggregate, Long value) { + return aggregate + value; + } + + // merges the partial sums + @Override + public Long merge(Long accum1, Long accum2) { + return accum1 + accum2; + } + + // extract result from the accumulator (here the accumulator and result is the same) + @Override + public Long result(Long accum) { + return accum; + } +} +``` + +### aggregateByKey and reduceByKey + +These are similar to the aggregate and reduce operations but does the aggregation per key. + +`aggregateByKey` aggregates the values for each key of the stream using the given Aggregator. + +```java +Stream words = ... // a windowed stream of words +Stream wordCounts = words.mapToPair(w -> Pair.of(w,1) // convert to a stream of (word, 1) pairs + .aggregateByKey(new Count<>()); // compute counts per word +``` + +`reduceByKey` performs a reduction on the values for each key of this stream by repeatedly applying the reducer. + +```java +Stream words = ... // a windowed stream of words +Stream wordCounts = words.mapToPair(w -> Pair.of(w,1) // convert to a stream of (word, 1) pairs + .reduceByKey((x, y) -> x + y); // compute counts per word +``` + + +Like the global aggregate/reduce, per-partition local aggregate (per key) is computed and the partial results are send to the target bolts where the partial results are merged to produce the final aggregate. + +### groupByKey + +`groupByKey` on a stream of key-value pairs returns a new stream where the values are grouped by the keys. + +```java +// a stream of (user, score) pairs e.g. ("alice", 10), ("bob", 15), ("bob", 20), ("alice", 11), ("alice", 13) +PairStream scores = ... + +// list of scores per user in the last window, e.g. ("alice", [10, 11, 13]), ("bob", [15, 20]) +PairStream> userScores = scores.window(...).groupByKey(); +``` + +### countByKey +`countByKey` counts the values for each key of this stream. + +```java +Stream words = ... // a windowed stream of words +Stream wordCounts = words.mapToPair(w -> Pair.of(w,1) // convert to a stream of (word, 1) pairs + .countByKey(); // compute counts per word +``` + +Internally `countByKey` uses `aggregateByKey` to compute the count. + +## Repartition + +A `repartition` operation re-partitions the current stream and returns a new stream with the specified number of partitions. Further operations on resultant stream would execute at that level of parallelism. Re-partiton can be used to increase or reduce the parallelism of the operations in the stream. + +The initial number of partitions can be also specified while creating the stream (via the StreamBuilder.newStream) + +```java +// Stream 's1' will have 2 partitions and operations on s1 will execute at this level of parallelism +Stream s1 = builder.newStream(new TestWordSpout(), new ValueMapper(0), 2); + +// Stream 's2' and further operations will have three partitions +Stream s2 = s1.map(function1).repartition(3); + +// perform a map operation on s2 and print the result +s2.map(function2).print(); +``` + +Note: a `repartition` operation implies network transfer. In the above example the first map operation (function1) would be executed at a parallelism of 2 (on two partitions of s1), whereas the second map operation (function2) would be executed at a parallelism of 3 (on three partitions of s2). This also means that the first and second map operations has to be executed on two separate bolts and involves network transfer. + +## Output operations + +Output operations push out the transformed values in the stream to the console, external sinks like databases, files or even Storm bolts. + +### print + +`print` prints the values in the stream to console. For example, + +```java +// transforms words to uppercase and prints to the console +words.map(String::toUpperCase).print(); +``` + +### peek + +`peek` returns a stream consisting of the elements of the stream, additionally performing the provided action on each element as they are consumed from the resulting stream. This can be used to ‘inspect’ the values flowing at any stage in a stream. + +```java +builder.newStream(...).flatMap(s -> Arrays.asList(s.split(" "))) + // print the results of the flatMap operation as the values flow across the stream. + .peek(s -> System.out.println(s)) + .mapToPair(w -> new Pair<>(w, 1)) +``` + +### forEach + +This is the most generic output operation and can be used to execute an arbitrary code for each value in the stream, like storing the results into an external database, file and so on. + +```java +stream.forEach(value -> { + // log it + LOG.debug(value) + // store the value into a db and so on... + statement.executeUpdate(..); + } +); +``` + +### to + +This allows one to plug in existing bolts as sinks. + +```java +// The redisBolt is a standard storm bolt +IRichBolt redisBolt = new RedisStoreBolt(poolConfig, storeMapper); +... +// generate the word counts and store it in redis using redis bolt +builder.newStream(new TestWordSpout(), new ValueMapper(0)) + .mapToPair(w -> Pair.of(w, 1)) + .countByKey() + // the (word, count) pairs are forwarded to the redisBolt which stores it in redis + .to(redisBolt); +``` + +Note that this will provide guarantees only based on what the bolt provides. + +## Branch + +A `branch` operation can be used to express If-then-else logic on streams. + +```java +Stream[] streams = stream.branch(Predicate... predicates) +``` + +The predicates are applied in the given order to the values of the stream and the result is forwarded to the corresponding (index based) result stream based on the first predicate that matches. If none of the predicates match a value, that value is dropped. + +For example, + +```java +Stream[] streams = builder.newStream(new RandomIntegerSpout(), new ValueMapper(0)) + .branch(x -> (x % 2) == 0, + x -> (x % 2) == 1); +Stream evenNumbers = streams[0]; +Stream oddNumbers = streams[1]; +``` + +## Joins + +A `join` operation joins the values of one stream with the values having the same key from another stream. + +```java +PairStream squares = … // (1, 1), (2, 4), (3, 9) ... +PairStream cubes = … // (1, 1), (2, 8), (3, 27) ... + +// join the sqaures and cubes stream to produce (1, [1, 1]), (2, [4, 8]), (3, [9, 27]) ... +PairStream> joined = squares.window(TumblingWindows.of(Duration.seconds(5))).join(cubes); +``` + +Joins are typically invoked on a windowed stream, joining the key-values that arrived on each stream in the current window. The parallelism of the stream on which the join is invoked is carried forward to the joined stream. An optional `ValueJoiner` can be passed as an argument to join to specify how to join the two values for each matching key (the default behavior is to return a `Pair` of the value from both streams). + +Left, right and full outer joins are supported. + +## CoGroupByKey + +`coGroupByKey` Groups the values of this stream with the values having the same key from the other stream. + +```java +// a stream of (key, value) pairs e.g. (k1, v1), (k2, v2), (k2, v3) +PairStream stream1 = ... + +// another stream of (key, value) pairs e.g. (k1, x1), (k1, x2), (k3, x3) +PairStream stream2 = ... + +// the co-grouped values per key in the last window, e.g. (k1, ([v1], [x1, x2]), (k2, ([v2, v3], [])), (k3, ([], [x3])) +PairStream> coGroupedStream = stream1.window(...).coGroupByKey(stream2); +``` + +## State + +Storm provides APIs for applications to save and update the state of its computation and also to query the state. + +### updateStateByKey + +`updateStateByKey` updates the state by applying a given state update function to the previous state and the new value for the key. `updateStateByKey` can be invoked with either an initial value for the state and a state update function or by directly providing a `StateUpdater` implementation. + +```java +PairStream wordCounts = ... +// Update the word counts in the state; here the first argument 0L is the initial value for the state and +// the second argument is a function that adds the count to the current value in the state. +StreamState streamState = wordCounts.updateStateByKey(0L, (state, count) -> state + count) +streamState.toPairStream().print(); +``` + +The state value can be of any type. In the above example its of type `Long` and stores the word count. + +Internally storm uses stateful bolts for storing the state. The Storm config `topology.state.provider` can be used to choose the state provider implementation. For example set this to `org.apache.storm.redis.state.RedisKeyValueStateProvider` for redis based state store. + +### stateQuery + +`stateQuery` can be used to query the state (updated by `updateStateByKey`). The `StreamState` returned by the updateStateByKey operation has to be used for querying stream state. The values in the stream are used as the keys to query the state. + +```java + +// The stream of words emitted by the QuerySpout is used as the keys to query the state. +builder.newStream(new QuerySpout(), new ValueMapper(0)) +// Queries the state and emits the matching (key, value) as results. +// The stream state returned by updateStateByKey is passed as the argument to stateQuery. +.stateQuery(streamState).print(); +``` + +# Guarantees + +Right now the topologies built using Stream API provides **at-least once** guarantee. + +Note that only the `updateStateByKey` operation currently executes on an underlying StatefulBolt. The other stateful operations (join, windowing, aggregation etc) executes on an IRichBolt and stores its state in memory. It relies on storms acking and replay mechanisms to rebuild the state. + +In future the underlying framework of the Stream API would be enhanced to provide **exactly once** guarantees. + +# Example + +Here's a word count topology expressed using the Stream API, + +```java +StreamBuilder builder = new StreamBuilder(); + +builder + // A stream of random sentences with two partitions + .newStream(new RandomSentenceSpout(), new ValueMapper(0), 2) + // a two seconds tumbling window + .window(TumblingWindows.of(Duration.seconds(2))) + // split the sentences to words + .flatMap(s -> Arrays.asList(s.split(" "))) + // create a stream of (word, 1) pairs + .mapToPair(w -> Pair.of(w, 1)) + // compute the word counts in the last two second window + .countByKey() + // print the results to stdout + .print(); +``` + +The `RandomSentenceSpout` is a regular Storm spout that continuously emits random sentences. The stream of sentences are split into two second windows and the word count within each window is computed and printed. + +The stream can then be submitted just like a regular topology as shown below. + +```java + Config config = new Config(); + config.setNumWorkers(1); + StormSubmitter.submitTopologyWithProgressBar("topology-name", config, builder.build()); +``` + +More examples are available under [storm-starter](../examples/storm-starter/src/jvm/org/apache/storm/starter/streams) which will help you get started. diff --git a/docs/Structure-of-the-codebase.md b/docs/Structure-of-the-codebase.md new file mode 100644 index 00000000000..c5804ee0d36 --- /dev/null +++ b/docs/Structure-of-the-codebase.md @@ -0,0 +1,129 @@ +--- +title: Structure of the Codebase +layout: documentation +documentation: true +--- +There are three distinct layers to Storm's codebase. + +First, Storm was designed from the very beginning to be compatible with multiple languages. Nimbus is a Thrift service and topologies are defined as Thrift structures. The usage of Thrift allows Storm to be used from any language. + +Second, all of Storm's interfaces are specified as Java interfaces. This means that every feature of Storm is always available via Java. + +The following sections explain each of these layers in more detail. + +### storm.thrift + +The first place to look to understand the structure of Storm's codebase is the [storm.thrift]({{page.git-blob-base}}/storm-client/src/storm.thrift) file. + +Every spout or bolt in a topology is given a user-specified identifier called the "component id". The component id is used to specify subscriptions from a bolt to the output streams of other spouts or bolts. A [StormTopology]({{page.git-blob-base}}/storm-client/src/storm.thrift) structure contains a map from component id to component for each type of component (spouts and bolts). + +Spouts and bolts have the same Thrift definition, so let's just take a look at the [Thrift definition for bolts]({{page.git-blob-base}}/storm-client/src/storm.thrift). It contains a `ComponentObject` struct and a `ComponentCommon` struct. + +The `ComponentObject` defines the implementation for the bolt. It can be one of three types: + +1. A serialized java object (that implements [IBolt]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/task/IBolt.java)) +2. A `ShellComponent` object that indicates the implementation is in another language. Specifying a bolt this way will cause Storm to instantiate a [ShellBolt]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/task/ShellBolt.java) object to handle the communication between the JVM-based worker process and the non-JVM-based implementation of the component. +3. A `JavaObject` structure which tells Storm the classname and constructor arguments to use to instantiate that bolt. This is useful if you want to define a topology in a non-JVM language. This way, you can make use of JVM-based spouts and bolts without having to create and serialize a Java object yourself. + +`ComponentCommon` defines everything else for this component. This includes: + +1. What streams this component emits and the metadata for each stream (whether it's a direct stream, the fields declaration) +2. What streams this component consumes (specified as a map from component_id:stream_id to the stream grouping to use) +3. The parallelism for this component +4. The component-specific [configuration](Configuration.html) for this component + +Note that the structure spouts also have a `ComponentCommon` field, and so spouts can also have declarations to consume other input streams. Yet the Storm Java API does not provide a way for spouts to consume other streams, and if you put any input declarations there for a spout you would get an error when you tried to submit the topology. The reason that spouts have an input declarations field is not for users to use, but for Storm itself to use. Storm adds implicit streams and bolts to the topology to set up the [acking framework](Acking-framework-implementation.html), and two of these implicit streams are from the acker bolt to each spout in the topology. The acker sends "ack" or "fail" messages along these streams whenever a tuple tree is detected to be completed or failed. The code that transforms the user's topology into the runtime topology is located [here]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/daemon/StormCommon.java). + +### Java interfaces + +The interfaces for Storm are generally specified as Java interfaces. The main interfaces are: + +1. [IRichBolt](javadocs/org/apache/storm/topology/IRichBolt.html) +2. [IRichSpout](javadocs/org/apache/storm/topology/IRichSpout.html) +3. [TopologyBuilder](javadocs/org/apache/storm/topology/TopologyBuilder.html) + +The strategy for the majority of the interfaces is to: + +1. Specify the interface using a Java interface +2. Provide a base class that provides default implementations when appropriate + +You can see this strategy at work with the [BaseRichSpout](javadocs/org/apache/storm/topology/base/BaseRichSpout.html) class. + +Spouts and bolts are serialized into the Thrift definition of the topology as described above. + +One subtle aspect of the interfaces is the difference between `IBolt` and `ISpout` vs. `IRichBolt` and `IRichSpout`. The main difference between them is the addition of the `declareOutputFields` method in the "Rich" versions of the interfaces. The reason for the split is that the output fields declaration for each output stream needs to be part of the Thrift struct (so it can be specified from any language), but as a user you want to be able to declare the streams as part of your class. What `TopologyBuilder` does when constructing the Thrift representation is call `declareOutputFields` to get the declaration and convert it into the Thrift structure. The conversion happens in the `TopologyBuilder` code. + + +### Implementation + +Specifying all the functionality via Java interfaces ensures that every feature of Storm is available via Java. Moreso, the focus on Java interfaces ensures that the user experience from Java-land is pleasant as well. + +Storm was originally implemented in Clojure, but most of the code has since been ported to Java. + +Here's a summary of the purpose of the main Java packages: + +#### Java packages + +[org.apache.storm.coordination]({{page.git-tree-base}}/storm-client/src/jvm/org/apache/storm/coordination): Implements the pieces required to coordinate batch-processing on top of Storm, which DRPC uses. `CoordinatedBolt` is the most important class here. + +[org.apache.storm.drpc]({{page.git-tree-base}}/storm-client/src/jvm/org/apache/storm/drpc): Implementation of the DRPC higher level abstraction + +[org.apache.storm.generated]({{page.git-tree-base}}/storm-client/src/jvm/org/apache/storm/generated): The generated Thrift code for Storm. + +[org.apache.storm.grouping]({{page.git-tree-base}}/storm-client/src/jvm/org/apache/storm/grouping): Contains interface for making custom stream groupings + +[org.apache.storm.hooks]({{page.git-tree-base}}/storm-client/src/jvm/org/apache/storm/hooks): Interfaces for hooking into various events in Storm, such as when tasks emit tuples, when tuples are acked, etc. User guide for hooks is [here](Hooks.html). + +[org.apache.storm.serialization]({{page.git-tree-base}}/storm-client/src/jvm/org/apache/storm/serialization): Implementation of how Storm serializes/deserializes tuples. Built on top of [Kryo](https://github.com/EsotericSoftware/kryo). + +[org.apache.storm.spout]({{page.git-tree-base}}/storm-client/src/jvm/org/apache/storm/spout): Definition of spout and associated interfaces (like the `SpoutOutputCollector`). Also contains `ShellSpout` which implements the protocol for defining spouts in non-JVM languages. + +[org.apache.storm.task]({{page.git-tree-base}}/storm-client/src/jvm/org/apache/storm/task): Definition of bolt and associated interfaces (like `OutputCollector`). Also contains `ShellBolt` which implements the protocol for defining bolts in non-JVM languages. Finally, `TopologyContext` is defined here as well, which is provided to spouts and bolts so they can get data about the topology and its execution at runtime. + +[org.apache.storm.testing]({{page.git-tree-base}}/storm-client/src/jvm/org/apache/storm/testing): Contains a variety of test bolts and utilities used in Storm's unit tests. + +[org.apache.storm.topology]({{page.git-tree-base}}/storm-client/src/jvm/org/apache/storm/topology): Java layer over the underlying Thrift structure to provide a clean, pure-Java API to Storm (users don't have to know about Thrift). `TopologyBuilder` is here as well as the helpful base classes for the different spouts and bolts. The slightly-higher level `IBasicBolt` interface is here, which is a simpler way to write certain kinds of bolts. + +[org.apache.storm.tuple]({{page.git-tree-base}}/storm-client/src/jvm/org/apache/storm/tuple): Implementation of Storm's tuple data model. + +[org.apache.storm.utils]({{page.git-tree-base}}/storm-client/src/jvm/org/apache/storm/utils): Data structures and miscellaneous utilities used throughout the codebase. This includes utilities for time simulation. + +[org.apache.storm.command.*]({{page.git-blob-base}}/storm-core/src/jvm/org/apache/storm/command): These implement various commands for the `storm` command line client. These implementations are very short. + +[org.apache.storm.cluster]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/cluster): This code manages how cluster state (like what tasks are running where, what spout/bolt each task runs as) is stored, typically in Zookeeper. + +[org.apache.storm.daemon.Acker]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/daemon/Acker.java): Implementation of the "acker" bolt, which is a key part of how Storm guarantees data processing. + +[org.apache.storm.daemon.DrpcServer]({{page.git-blob-base}}/storm-webapp/src/jvm/org/apache/storm/daemon/DrpcServer.java): Implementation of the DRPC server for use with DRPC topologies. + +[org.apache.storm.event]({{page.git-blob-base}}/storm-server/src/jvm/org/apache/storm/event): Implements a simple asynchronous function executor. Used in various places in Nimbus and Supervisor to make functions execute in serial to avoid any race conditions. + +[org.apache.storm.LocalCluster]({{page.git-blob-base}}/storm-server/src/main/java/org/apache/storm/LocalCluster.java): Utility to boot up Storm inside an existing Java process. Often used in conjunction with `Testing.java` to implement integration tests. + +[org.apache.storm.messaging.*]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/messaging): Defines a higher level interface to implementing point to point messaging. In local mode Storm uses in-memory Java queues to do this; on a cluster, it uses Netty, but it is pluggable. + +[org.apache.storm.stats]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/stats): Implementation of stats rollup routines used when sending stats to ZK for use by the UI. Does things like windowed and rolling aggregations at multiple granularities. + +[org.apache.storm.Thrift]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/Thrift.java): Wrappers around the generated Thrift API to make working with Thrift structures more pleasant. + +[org.apache.storm.StormTimer]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/StormTimer.java): Implementation of a background timer to execute functions in the future or on a recurring interval. Storm couldn't use the [Timer](http://docs.oracle.com/javase/1.4.2/docs/api/java/util/Timer.html) class because it needed integration with time simulation in order to be able to unit test Nimbus and the Supervisor. + +[org.apache.storm.daemon.nimbus]({{page.git-blob-base}}/storm-server/src/jvm/org/apache/storm/daemon/nimbus/Nimbus.java): Implementation of Nimbus. + +[org.apache.storm.daemon.supervisor]({{page.git-blob-base}}/storm-server/src/jvm/org/apache/storm/daemon/supervisor/Supervisor.java): Implementation of Supervisor. + +[org.apache.storm.daemon.task]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/daemon/Task.java): Implementation of an individual task for a spout or bolt. Handles message routing, serialization, stats collection for the UI, as well as the spout-specific and bolt-specific execution implementations. + +[org.apache.storm.daemon.worker]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/daemon/worker/Worker.java): Implementation of a worker process (which will contain many tasks within). Implements message transferring and task launching. + +[org.apache.storm.Testing]({{page.git-blob-base}}/storm-server/src/main/java/org/apache/storm/Testing.java): Various utilities for working with local clusters during tests, e.g. `completeTopology` for running a fixed set of tuples through a topology for capturing the output, tracker topologies for having fine grained control over detecting when a cluster is "idle", and other utilities. + +#### Clojure namespaces + +[org.apache.storm.clojure]({{page.git-blob-base}}/storm-clojure/src/clj/org/apache/storm/clojure.clj): Implementation of the Clojure DSL for Storm. + +[org.apache.storm.config]({{page.git-blob-base}}/storm-clojure/src/clj/org/apache/storm/config.clj): Created clojure symbols for config names in [Config.java](javadocs/org/apache/storm/Config.html) + +[org.apache.storm.log]({{page.git-blob-base}}/storm-clojure/src/clj/org/apache/storm/log.clj): Defines the functions used to log messages to log4j. + +[org.apache.storm.ui.*]({{page.git-blob-base}}/storm-core/src/clj/org/apache/storm/ui): Implementation of Storm UI. Completely independent from rest of code base and uses the Nimbus Thrift API to get data. diff --git a/docs/Support-for-non-java-languages.md b/docs/Support-for-non-java-languages.md new file mode 100644 index 00000000000..d03dcadcc3b --- /dev/null +++ b/docs/Support-for-non-java-languages.md @@ -0,0 +1,9 @@ +--- +title: Support for Non-Java Languages +layout: documentation +documentation: true +--- +* [Scala DSL](https://github.com/velvia/ScalaStorm) +* [JRuby DSL](https://github.com/colinsurprenant/storm-jruby) +* [Clojure DSL](Clojure-DSL.html) +* [io-storm](https://github.com/gphat/io-storm): Perl multilang adapter diff --git a/docs/Transactional-topologies.md b/docs/Transactional-topologies.md new file mode 100644 index 00000000000..612ac32f1a0 --- /dev/null +++ b/docs/Transactional-topologies.md @@ -0,0 +1,361 @@ +--- +title: Transactional Topologies +layout: documentation +documentation: true +--- +**NOTE**: Transactional topologies have been deprecated -- use the [Trident](Trident-tutorial.html) framework instead. + +__________________________________________________________________________ + +Storm [guarantees data processing](Guaranteeing-message-processing.html) by providing an at least once processing guarantee. The most common question asked about Storm is "Given that tuples can be replayed, how do you do things like counting on top of Storm? Won't you overcount?" + +Storm 0.7.0 introduces transactional topologies, which enable you to get exactly once messaging semantics for pretty much any computation. So you can do things like counting in a fully-accurate, scalable, and fault-tolerant way. + +Like [Distributed RPC](Distributed-RPC.html), transactional topologies aren't so much a feature of Storm as they are a higher level abstraction built on top of Storm's primitives of streams, spouts, bolts, and topologies. + +This page explains the transactional topology abstraction, how to use the API, and provides details as to its implementation. + +## Concepts + +Let's build up to Storm's abstraction for transactional topologies one step at a time. Let's start by looking at the simplest possible approach, and then we'll iterate on the design until we reach Storm's design. + +### Design 1 + +The core idea behind transactional topologies is to provide a _strong ordering_ on the processing of data. The simplest manifestation of this, and the first design we'll look at, is processing the tuples one at a time and not moving on to the next tuple until the current tuple has been successfully processed by the topology. + +Each tuple is associated with a transaction id. If the tuple fails and needs to be replayed, then it is emitted with the exact same transaction id. A transaction id is an integer that increments for every tuple, so the first tuple will have transaction id `1`, the second id `2`, and so on. + +The strong ordering of tuples gives you the capability to achieve exactly-once semantics even in the case of tuple replay. Let's look at an example of how you would do this. + +Suppose you want to do a global count of the tuples in the stream. Instead of storing just the count in the database, you instead store the count and the latest transaction id together as one value in the database. When your code updates the count in the db, it should update the count *only if the transaction id in the database differs from the transaction id for the tuple currently being processed*. Consider the two cases: + +1. *The transaction id in the database is different than the current transaction id:* Because of the strong ordering of transactions, we know for sure that the current tuple isn't represented in that count. So we can safely increment the count and update the transaction id. +2. *The transaction id is the same as the current transaction id:* Then we know that this tuple is already incorporated into the count and can skip the update. The tuple must have failed after updating the database but before reporting success back to Storm. + +This logic and the strong ordering of transactions ensures that the count in the database will be accurate even if tuples are replayed. Credit for this trick of storing a transaction id in the database along with the value goes to the Kafka devs, particularly [this design document](http://incubator.apache.org/kafka/07/design.html). + +Furthermore, notice that the topology can safely update many sources of state in the same transaction and achieve exactly-once semantics. If there's a failure, any updates that already succeeded will skip on the retry, and any updates that failed will properly retry. For example, if you were processing a stream of tweeted urls, you could update a database that stores a tweet count for each url as well as a database that stores a tweet count for each domain. + +There is a significant problem though with this design of processing one tuple at time. Having to wait for each tuple to be _completely processed_ before moving on to the next one is horribly inefficient. It entails a huge amount of database calls (at least one per tuple), and this design makes very little use of the parallelization capabilities of Storm. So it isn't very scalable. + +### Design 2 + +Instead of processing one tuple at a time, a better approach is to process a batch of tuples for each transaction. So if you're doing a global count, you would increment the count by the number of tuples in the entire batch. If a batch fails, you replay the exact batch that failed. Instead of assigning a transaction id to each tuple, you assign a transaction id to each batch, and the processing of the batches is strongly ordered. Here's a diagram of this design: + +![Storm cluster](images/transactional-batches.png) + +So if you're processing 1000 tuples per batch, your application will do 1000x less database operations than design 1. Additionally, it takes advantage of Storm's parallelization capabilities as the computation for each batch can be parallelized. + +While this design is significantly better than design 1, it's still not as resource-efficient as possible. The workers in the topology spend a lot of time being idle waiting for the other portions of the computation to finish. For example, in a topology like this: + +![Storm cluster](images/transactional-design-2.png) + +After bolt 1 finishes its portion of the processing, it will be idle until the rest of the bolts finish and the next batch can be emitted from the spout. + +### Design 3 (Storm's design) + +A key realization is that not all the work for processing batches of tuples needs to be strongly ordered. For example, when computing a global count, there's two parts to the computation: + +1. Computing the partial count for the batch +2. Updating the global count in the database with the partial count + +The computation of #2 needs to be strongly ordered across the batches, but there's no reason you shouldn't be able to _pipeline_ the computation of the batches by computing #1 for many batches in parallel. So while batch 1 is working on updating the database, batches 2 through 10 can compute their partial counts. + +Storm accomplishes this distinction by breaking the computation of a batch into two phases: + +1. The processing phase: this is the phase that can be done in parallel for many batches +2. The commit phase: The commit phases for batches are strongly ordered. So the commit for batch 2 is not done until the commit for batch 1 has been successful. + +The two phases together are called a "transaction". Many batches can be in the processing phase at a given moment, but only one batch can be in the commit phase. If there's any failure in the processing or commit phase for a batch, the entire transaction is replayed (both phases). + +## Design details + +When using transactional topologies, Storm does the following for you: + +1. *Manages state:* Storm stores in Zookeeper all the state necessary to do transactional topologies. This includes the current transaction id as well as the metadata defining the parameters for each batch. +2. *Coordinates the transactions:* Storm will manage everything necessary to determine which transactions should be processing or committing at any point. +3. *Fault detection:* Storm leverages the acking framework to efficiently determine when a batch has successfully processed, successfully committed, or failed. Storm will then replay batches appropriately. You don't have to do any acking or anchoring -- Storm manages all of this for you. +4. *First class batch processing API*: Storm layers an API on top of regular bolts to allow for batch processing of tuples. Storm manages all the coordination for determining when a task has received all the tuples for that particular transaction. Storm will also take care of cleaning up any accumulated state for each transaction (like the partial counts). + +Finally, another thing to note is that transactional topologies require a source queue that can replay an exact batch of messages. Technologies like [Kestrel](https://github.com/robey/kestrel) can't do this. [Apache Kafka](http://incubator.apache.org/kafka/index.html) is a perfect fit for this kind of spout, and [storm-kafka-client](https://github.com/apache/storm/tree/master/external/storm-kafka-client) contains a transactional spout implementation for Kafka. + +## The basics through example + +You build transactional topologies by using [TransactionalTopologyBuilder](javadocs/org/apache/storm/transactional/TransactionalTopologyBuilder.html). Here's the transactional topology definition for a topology that computes the global count of tuples from the input stream. This code comes from [TransactionalGlobalCount]({{page.git-blob-base}}/examples/storm-starter/src/jvm/org/apache/storm/starter/TransactionalGlobalCount.java) in storm-starter. + +```java +MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH); +TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("global-count", "spout", spout, 3); +builder.setBolt("partial-count", new BatchCount(), 5) + .shuffleGrouping("spout"); +builder.setBolt("sum", new UpdateGlobalCount()) + .globalGrouping("partial-count"); +``` + +`TransactionalTopologyBuilder` takes as input in the constructor an id for the transactional topology, an id for the spout within the topology, a transactional spout, and optionally the parallelism for the transactional spout. The id for the transactional topology is used to store state about the progress of topology in Zookeeper, so that if you restart the topology it will continue where it left off. + +A transactional topology has a single `TransactionalSpout` that is defined in the constructor of `TransactionalTopologyBuilder`. In this example, `MemoryTransactionalSpout` is used which reads in data from an in-memory partitioned source of data (the `DATA` variable). The second argument defines the fields for the data, and the third argument specifies the maximum number of tuples to emit from each partition per batch of tuples. The interface for defining your own transactional spouts is discussed later on in this tutorial. + +Now on to the bolts. This topology parallelizes the computation of the global count. The first bolt, `BatchCount`, randomly partitions the input stream using a shuffle grouping and emits the count for each partition. The second bolt, `UpdateGlobalCount`, does a global grouping and sums together the partial counts to get the count for the batch. It then updates the global count in the database if necessary. + +Here's the definition of `BatchCount`: + +```java +public static class BatchCount extends BaseBatchBolt { + Object _id; + BatchOutputCollector _collector; + + int _count = 0; + + @Override + public void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, Object id) { + _collector = collector; + _id = id; + } + + @Override + public void execute(Tuple tuple) { + _count++; + } + + @Override + public void finishBatch() { + _collector.emit(new Values(_id, _count)); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("id", "count")); + } +} +``` + +A new instance of this object is created for every batch that's being processed. The actual bolt this runs within is called [BatchBoltExecutor](https://github.com/apache/storm/blob/0.7.0/src/jvm/org/apache/storm/coordination/BatchBoltExecutor.java) and manages the creation and cleanup for these objects. + +The `prepare` method parameterizes this batch bolt with the Storm config, the topology context, an output collector, and the id for this batch of tuples. In the case of transactional topologies, the id will be a [TransactionAttempt](javadocs/org/apache/storm/transactional/TransactionAttempt.html) object. The batch bolt abstraction can be used in Distributed RPC as well which uses a different type of id for the batches. `BatchBolt` can actually be parameterized with the type of the id, so if you only intend to use the batch bolt for transactional topologies, you can extend `BaseTransactionalBolt` which has this definition: + +```java +public abstract class BaseTransactionalBolt extends BaseBatchBolt { +} +``` + +All tuples emitted within a transactional topology must have the `TransactionAttempt` as the first field of the tuple. This lets Storm identify which tuples belong to which batches. So when you emit tuples you need to make sure to meet this requirement. + +The `TransactionAttempt` contains two values: the "transaction id" and the "attempt id". The "transaction id" is the unique id chosen for this batch and is the same no matter how many times the batch is replayed. The "attempt id" is a unique id for this particular batch of tuples and lets Storm distinguish tuples from different emissions of the same batch. Without the attempt id, Storm could confuse a replay of a batch with tuples from a prior time that batch was emitted. This would be disastrous. + +The transaction id increases by 1 for every batch emitted. So the first batch has id "1", the second has id "2", and so on. + +The `execute` method is called for every tuple in the batch. You should accumulate state for the batch in a local instance variable every time this method is called. The `BatchCount` bolt increments a local counter variable for every tuple. + +Finally, `finishBatch` is called when the task has received all tuples intended for it for this particular batch. `BatchCount` emits the partial count to the output stream when this method is called. + +Here's the definition of `UpdateGlobalCount`: + +```java +public static class UpdateGlobalCount extends BaseTransactionalBolt implements ICommitter { + TransactionAttempt _attempt; + BatchOutputCollector _collector; + + int _sum = 0; + + @Override + public void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, TransactionAttempt attempt) { + _collector = collector; + _attempt = attempt; + } + + @Override + public void execute(Tuple tuple) { + _sum+=tuple.getInteger(1); + } + + @Override + public void finishBatch() { + Value val = DATABASE.get(GLOBAL_COUNT_KEY); + Value newval; + if(val == null || !val.txid.equals(_attempt.getTransactionId())) { + newval = new Value(); + newval.txid = _attempt.getTransactionId(); + if(val==null) { + newval.count = _sum; + } else { + newval.count = _sum + val.count; + } + DATABASE.put(GLOBAL_COUNT_KEY, newval); + } else { + newval = val; + } + _collector.emit(new Values(_attempt, newval.count)); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("id", "sum")); + } +} +``` + +`UpdateGlobalCount` is specific to transactional topologies so it extends `BaseTransactionalBolt`. In the `execute` method, `UpdateGlobalCount` accumulates the count for this batch by summing together the partial batches. The interesting stuff happens in `finishBatch`. + +First, notice that this bolt implements the `ICommitter` interface. This tells Storm that the `finishBatch` method of this bolt should be part of the commit phase of the transaction. So calls to `finishBatch` for this bolt will be strongly ordered by transaction id (calls to `execute` on the other hand can happen during either the processing or commit phases). An alternative way to mark a bolt as a committer is to use the `setCommitterBolt` method in `TransactionalTopologyBuilder` instead of `setBolt`. + +The code for `finishBatch` in `UpdateGlobalCount` gets the current value from the database and compares its transaction id to the transaction id for this batch. If they are the same, it does nothing. Otherwise, it increments the value in the database by the partial count for this batch. + +A more involved transactional topology example that updates multiple databases idempotently can be found in storm-starter in the [TransactionalWords]({{page.git-blob-base}}/examples/storm-starter/src/jvm/org/apache/storm/starter/TransactionalWords.java) class. + +## Transactional Topology API + +This section outlines the different pieces of the transactional topology API. + +### Bolts + +There are three kinds of bolts possible in a transactional topology: + +1. [BasicBolt](javadocs/org/apache/storm/topology/base/BaseBasicBolt.html): This bolt doesn't deal with batches of tuples and just emits tuples based on a single tuple of input. +2. [BatchBolt](javadocs/org/apache/storm/topology/base/BaseBatchBolt.html): This bolt processes batches of tuples. `execute` is called for each tuple, and `finishBatch` is called when the batch is complete. +3. BatchBolt's that are marked as committers: The only difference between this bolt and a regular batch bolt is when `finishBatch` is called. A committer bolt has `finishedBatch` called during the commit phase. The commit phase is guaranteed to occur only after all prior batches have successfully committed, and it will be retried until all bolts in the topology succeed the commit for the batch. There are two ways to make a `BatchBolt` a committer, by having the `BatchBolt` implement the [ICommitter](javadocs/org/apache/storm/transactional/ICommitter.html) marker interface, or by using the `setCommiterBolt` method in `TransactionalTopologyBuilder`. + +#### Processing phase vs. commit phase in bolts + +To nail down the difference between the processing phase and commit phase of a transaction, let's look at an example topology: + +![Storm cluster](images/transactional-commit-flow.png) + +In this topology, only the bolts with a red outline are committers. + +During the processing phase, bolt A will process the complete batch from the spout, call `finishBatch` and send its tuples to bolts B and C. Bolt B is a committer so it will process all the tuples but finishBatch won't be called. Bolt C also will not have `finishBatch` called because it doesn't know if it has received all the tuples from Bolt B yet (because Bolt B is waiting for the transaction to commit). Finally, Bolt D will receive any tuples Bolt C emitted during invocations of its `execute` method. + +When the batch commits, `finishBatch` is called on Bolt B. Once it finishes, Bolt C can now detect that it has received all the tuples and will call `finishBatch`. Finally, Bolt D will receive its complete batch and call `finishBatch`. + +Notice that even though Bolt D is a committer, it doesn't have to wait for a second commit message when it receives the whole batch. Since it receives the whole batch during the commit phase, it goes ahead and completes the transaction. + +Committer bolts act just like batch bolts during the commit phase. The only difference between committer bolts and batch bolts is that committer bolts will not call `finishBatch` during the processing phase of a transaction. + +#### Acking + +Notice that you don't have to do any acking or anchoring when working with transactional topologies. Storm manages all of that underneath the hood. The acking strategy is heavily optimized. + +#### Failing a transaction + +When using regular bolts, you can call the `fail` method on `OutputCollector` to fail the tuple trees of which that tuple is a member. Since transactional topologies hide the acking framework from you, they provide a different mechanism to fail a batch (and cause the batch to be replayed). Just throw a [FailedException](javadocs/org/apache/storm/topology/FailedException.html). Unlike regular exceptions, this will only cause that particular batch to replay and will not crash the process. + +### Transactional spout + +The `TransactionalSpout` interface is completely different from a regular `Spout` interface. A `TransactionalSpout` implementation emits batches of tuples and must ensure that the same batch of tuples is always emitted for the same transaction id. + +A transactional spout looks like this while a topology is executing: + +![Storm cluster](images/transactional-spout-structure.png) + +The coordinator on the left is a regular Storm spout that emits a tuple whenever a batch should be emitted for a transaction. The emitters execute as a regular Storm bolt and are responsible for emitting the actual tuples for the batch. The emitters subscribe to the "batch emit" stream of the coordinator using an all grouping. + +The need to be idempotent with respect to the tuples it emits requires a `TransactionalSpout` to store a small amount of state. The state is stored in Zookeeper. + +The details of implementing a `TransactionalSpout` are in [the Javadoc](javadocs/org/apache/storm/transactional/ITransactionalSpout.html). + +#### Partitioned Transactional Spout + +A common kind of transactional spout is one that reads the batches from a set of partitions across many queue brokers. For example, this is how [KafkaTridentSpoutTransactional]({{page.git-tree-base}}/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/trident/KafkaTridentSpoutTransactional.java) works. An `IPartitionedTransactionalSpout` automates the bookkeeping work of managing the state for each partition to ensure idempotent replayability. See [the Javadoc](javadocs/org/apache/storm/transactional/partitioned/IPartitionedTransactionalSpout.html) for more details. + +### Configuration + +There's two important bits of configuration for transactional topologies: + +1. *Zookeeper:* By default, transactional topologies will store state in the same Zookeeper instance as used to manage the Storm cluster. You can override this with the "transactional.zookeeper.servers" and "transactional.zookeeper.port" configs. +2. *Number of active batches permissible at once:* You must set a limit to the number of batches that can be processed at once. You configure this using the "topology.max.spout.pending" config. If you don't set this config, it will default to 1. + +## What if you can't emit the same batch of tuples for a given transaction id? + +So far the discussion around transactional topologies has assumed that you can always emit the exact same batch of tuples for the same transaction id. So what do you do if this is not possible? + +Consider an example of when this is not possible. Suppose you are reading tuples from a partitioned message broker (stream is partitioned across many machines), and a single transaction will include tuples from all the individual machines. Now suppose one of the nodes goes down at the same time that a transaction fails. Without that node, it is impossible to replay the same batch of tuples you just played for that transaction id. The processing in your topology will halt as its unable to replay the identical batch. The only possible solution is to emit a different batch for that transaction id than you emitted before. Is it possible to still achieve exactly-once messaging semantics even if the batches change? + +It turns out that you can still achieve exactly-once messaging semantics in your processing with a non-idempotent transactional spout, although this requires a bit more work on your part in developing the topology. + +If a batch can change for a given transaction id, then the logic we've been using so far of "skip the update if the transaction id in the database is the same as the id for the current transaction" is no longer valid. This is because the current batch is different than the batch for the last time the transaction was committed, so the result will not necessarily be the same. You can fix this problem by storing a little bit more state in the database. Let's again use the example of storing a global count in the database and suppose the partial count for the batch is stored in the `partialCount` variable. + +Instead of storing a value in the database that looks like this: + +```java +class Value { + Object count; + BigInteger txid; +} +``` + +For non-idempotent transactional spouts you should instead store a value that looks like this: + +```java +class Value { + Object count; + BigInteger txid; + Object prevCount; +} +``` + +The logic for the update is as follows: + +1. If the transaction id for the current batch is the same as the transaction id in the database, set `val.count = val.prevCount + partialCount`. +2. Otherwise, set `val.prevCount = val.count`, `val.count = val.count + partialCount` and `val.txid = batchTxid`. + +This logic works because once you commit a particular transaction id for the first time, all prior transaction ids will never be committed again. + +There's a few more subtle aspects of transactional topologies that make opaque transactional spouts possible. + +When a transaction fails, all subsequent transactions in the processing phase are considered failed as well. Each of those transactions will be re-emitted and reprocessed. Without this behavior, the following situation could happen: + +1. Transaction A emits tuples 1-50 +2. Transaction B emits tuples 51-100 +3. Transaction A fails +4. Transaction A emits tuples 1-40 +5. Transaction A commits +6. Transaction B commits +7. Transaction C emits tuples 101-150 + +In this scenario, tuples 41-50 are skipped. By failing all subsequent transactions, this would happen instead: + +1. Transaction A emits tuples 1-50 +2. Transaction B emits tuples 51-100 +3. Transaction A fails (and causes Transaction B to fail) +4. Transaction A emits tuples 1-40 +5. Transaction B emits tuples 41-90 +5. Transaction A commits +6. Transaction B commits +7. Transaction C emits tuples 91-140 + +By failing all subsequent transactions on failure, no tuples are skipped. This also shows that a requirement of transactional spouts is that they always emit where the last transaction left off. + +A non-idempotent transactional spout is more concisely referred to as an "OpaqueTransactionalSpout" (opaque is the opposite of idempotent). [IOpaquePartitionedTransactionalSpout](javadocs/org/apache/storm/transactional/partitioned/IOpaquePartitionedTransactionalSpout.html) is an interface for implementing opaque partitioned transactional spouts, of which [KafkaTridentSpoutOpaque]({{page.git-tree-base}}/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/Trident/KafkaTridentSpoutOpaque.java) is an example. `KafkaTridentSpoutOpaque` can withstand losing individual Kafka nodes without sacrificing accuracy as long as you use the update strategy as explained in this section. + +## Implementation + +The implementation for transactional topologies is very elegant. Managing the commit protocol, detecting failures, and pipelining batches seem complex, but everything turns out to be a straightforward mapping to Storm's primitives. + +How the data flow works: + +Here's how transactional spout works: + +1. Transactional spout is a subtopology consisting of a coordinator spout and an emitter bolt +2. The coordinator is a regular spout with a parallelism of 1 +3. The emitter is a bolt with a parallelism of P, connected to the coordinator's "batch" stream using an all grouping +4. When the coordinator determines it's time to enter the processing phase for a transaction, it emits a tuple containing the TransactionAttempt and the metadata for that transaction to the "batch" stream +5. Because of the all grouping, every single emitter task receives the notification that it's time to emit its portion of the tuples for that transaction attempt +6. Storm automatically manages the anchoring/acking necessary throughout the whole topology to determine when a transaction has completed the processing phase. The key here is that *the root tuple was created by the coordinator, so the coordinator will receive an "ack" if the processing phase succeeds, and a "fail" if it doesn't succeed for any reason (failure or timeout). +7. If the processing phase succeeds, and all prior transactions have successfully committed, the coordinator emits a tuple containing the TransactionAttempt to the "commit" stream. +8. All committing bolts subscribe to the commit stream using an all grouping, so that they will all receive a notification when the commit happens. +9. Like the processing phase, the coordinator uses the acking framework to determine whether the commit phase succeeded or not. If it receives an "ack", it marks that transaction as complete in zookeeper. + +More notes: + +- Transactional spouts are a sub-topology consisting of a spout and a bolt + - the spout is the coordinator and contains a single task + - the bolt is the emitter + - the bolt subscribes to the coordinator with an all grouping + - serialization of metadata is handled by kryo. kryo is initialized ONLY with the registrations defined in the component configuration for the transactionalspout +- the coordinator uses the acking framework to determine when a batch has been successfully processed, and then to determine when a batch has been successfully committed. +- state is stored in zookeeper using RotatingTransactionalState +- commiting bolts subscribe to the coordinators commit stream using an all grouping +- CoordinatedBolt is used to detect when a bolt has received all the tuples for a particular batch. + - this is the same abstraction that is used in DRPC + - for commiting bolts, it waits to receive a tuple from the coordinator's commit stream before calling finishbatch + - so it can't call finishbatch until it's received all tuples from all subscribed components AND its received the commit stream tuple (for committers). this ensures that it can't prematurely call finishBatch diff --git a/docs/Trident-API-Overview.md b/docs/Trident-API-Overview.md new file mode 100644 index 00000000000..b9f074c22ef --- /dev/null +++ b/docs/Trident-API-Overview.md @@ -0,0 +1,642 @@ +--- +title: Trident API Overview +layout: documentation +documentation: true +--- + +The core data model in Trident is the "Stream", processed as a series of batches. A stream is partitioned among the nodes in the cluster, and operations applied to a stream are applied in parallel across each partition. + +There are five kinds of operations in Trident: + +1. Operations that apply locally to each partition and cause no network transfer +2. Repartitioning operations that repartition a stream but otherwise don't change the contents (involves network transfer) +3. Aggregation operations that do network transfer as part of the operation +4. Operations on grouped streams +5. Merges and joins + +## Partition-local operations + +Partition-local operations involve no network transfer and are applied to each batch partition independently. + +### Functions + +A function takes in a set of input fields and emits zero or more tuples as output. The fields of the output tuple are appended to the original input tuple in the stream. If a function emits no tuples, the original input tuple is filtered out. Otherwise, the input tuple is duplicated for each output tuple. Suppose you have this function: + +```java +public class MyFunction extends BaseFunction { + public void execute(TridentTuple tuple, TridentCollector collector) { + for(int i=0; i < tuple.getInteger(0); i++) { + collector.emit(new Values(i)); + } + } +} +``` + +Now suppose you have a stream in the variable "mystream" with the fields ["a", "b", "c"] with the following tuples: + +``` +[1, 2, 3] +[4, 1, 6] +[3, 0, 8] +``` + +If you run this code: + +```java +mystream.each(new Fields("b"), new MyFunction(), new Fields("d"))) +``` + +The resulting tuples would have fields ["a", "b", "c", "d"] and look like this: + +``` +[1, 2, 3, 0] +[1, 2, 3, 1] +[4, 1, 6, 0] +``` + +### Filters + +Filters take in a tuple as input and decide whether or not to keep that tuple or not. Suppose you had this filter: + +```java +public class MyFilter extends BaseFilter { + public boolean isKeep(TridentTuple tuple) { + return tuple.getInteger(0) == 1 && tuple.getInteger(1) == 2; + } +} +``` + +Now suppose you had these tuples with fields ["a", "b", "c"]: + +``` +[1, 2, 3] +[2, 1, 1] +[2, 3, 4] +``` + +If you ran this code: + +```java +mystream.filter(new MyFilter()) +``` + +The resulting tuples would be: + +``` +[1, 2, 3] +``` + +### map and flatMap + +`map` returns a stream consisting of the result of applying the given mapping function to the tuples of the stream. This +can be used to apply a one-one transformation to the tuples. + +For example, if there is a stream of words and you wanted to convert it to a stream of upper case words, +you could define a mapping function as follows, + +```java +public class UpperCase extends MapFunction { + @Override + public Values execute(TridentTuple input) { + return new Values(input.getString(0).toUpperCase()); + } +} +``` + +The mapping function can then be applied on the stream to produce a stream of uppercase words. + +```java +mystream.map(new UpperCase()) +``` + +`flatMap` is similar to `map` but has the effect of applying a one-to-many transformation to the values of the stream, +and then flattening the resulting elements into a new stream. + +For example, if there is a stream of sentences and you wanted to convert it to a stream of words, +you could define a flatMap function as follows, + +```java +public class Split extends FlatMapFunction { + @Override + public Iterable execute(TridentTuple input) { + List valuesList = new ArrayList<>(); + for (String word : input.getString(0).split(" ")) { + valuesList.add(new Values(word)); + } + return valuesList; + } +} +``` + +The flatMap function can then be applied on the stream of sentences to produce a stream of words, + +```java +mystream.flatMap(new Split()) +``` + +Of course these operations can be chained, so a stream of uppercase words can be obtained from a stream of sentences as follows, + +```java +mystream.flatMap(new Split()).map(new UpperCase()) +``` + +If you don't pass output fields as parameter, map and flatMap preserves the input fields to output fields. + +If you want to apply MapFunction or FlatMapFunction with replacing old fields with new output fields, +you can call map / flatMap with additional Fields parameter as follows, + +```java +mystream.map(new UpperCase(), new Fields("uppercased")) +``` + +Output stream wil have only one output field "uppercased" regardless of what output fields previous stream had. +Same thing applies to flatMap, so following is valid as well, + +```java +mystream.flatMap(new Split(), new Fields("word")) +``` + +### peek +`peek` can be used to perform an additional action on each trident tuple as they flow through the stream. + This could be useful for debugging to see the tuples as they flow past a certain point in a pipeline. + +For example, the below code would print the result of converting the words to uppercase before they are passed to `groupBy` + +```java + mystream.flatMap(new Split()).map(new UpperCase()) + .peek(new Consumer() { + @Override + public void accept(TridentTuple input) { + System.out.println(input.getString(0)); + } + }) + .groupBy(new Fields("word")) + .persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count")) +``` + +### min and minBy +`min` and `minBy` operations return minimum value on each partition of a batch of tuples in a trident stream. + +Suppose, a trident stream contains fields ["device-id", "count"] and the following partitions of tuples + +``` +Partition 0: +[123, 2] +[113, 54] +[23, 28] +[237, 37] +[12, 23] +[62, 17] +[98, 42] + +Partition 1: +[64, 18] +[72, 54] +[2, 28] +[742, 71] +[98, 45] +[62, 12] +[19, 174] + + +Partition 2: +[27, 94] +[82, 23] +[9, 86] +[53, 71] +[74, 37] +[51, 49] +[37, 98] +``` + +`minBy` operation can be applied on the above stream of tuples like below which results in emitting tuples with minimum values of `count` field in each partition. + +```java + mystream.minBy(new Fields("count")) +``` + +Result of the above code on mentioned partitions is: + +``` +Partition 0: +[123, 2] + + +Partition 1: +[62, 12] + + +Partition 2: +[82, 23] +``` + +You can look at other `min` and `minBy` operations on Stream +``` java + public Stream minBy(String inputFieldName, Comparator comparator) + public Stream min(Comparator comparator) +``` +Below example shows how these APIs can be used to find minimum using respective Comparators on a tuple. + +``` java + + FixedBatchSpout spout = new FixedBatchSpout(allFields, 10, Vehicle.generateVehicles(20)); + + TridentTopology topology = new TridentTopology(); + Stream vehiclesStream = topology.newStream("spout1", spout). + each(allFields, new Debug("##### vehicles")); + + Stream slowVehiclesStream = + vehiclesStream + .min(new SpeedComparator()) // Comparator w.r.t speed on received tuple. + .each(vehicleField, new Debug("#### slowest vehicle")); + + vehiclesStream + .minBy(Vehicle.FIELD_NAME, new EfficiencyComparator()) // Comparator w.r.t efficiency on received tuple. + .each(vehicleField, new Debug("#### least efficient vehicle")); + +``` +Example applications of these APIs can be located at [TridentMinMaxOfDevicesTopology](https://github.com/apache/storm/blob/master/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentMinMaxOfDevicesTopology.java) and [TridentMinMaxOfVehiclesTopology](https://github.com/apache/storm/blob/master/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentMinMaxOfVehiclesTopology.java) + +### max and maxBy +`max` and `maxBy` operations return maximum value on each partition of a batch of tuples in a trident stream. + +Suppose, a trident stream contains fields ["device-id", "count"] as mentioned in the above section. + +`max` and `maxBy` operations can be applied on the above stream of tuples like below which results in emitting tuples with maximum values of `count` field for each partition. + +``` java + mystream.maxBy(new Fields("count")) +``` +Result of the above code on mentioned partitions is: + +``` +Partition 0: +[113, 54] + + +Partition 1: +[19, 174] + + +Partition 2: +[37, 98] + +``` + +You can look at other `max` and `maxBy` functions on Stream + +``` java + + public Stream maxBy(String inputFieldName, Comparator comparator) + public Stream max(Comparator comparator) + +``` + +Below example shows how these APIs can be used to find maximum using respective Comparators on a tuple. + +``` java + + FixedBatchSpout spout = new FixedBatchSpout(allFields, 10, Vehicle.generateVehicles(20)); + + TridentTopology topology = new TridentTopology(); + Stream vehiclesStream = topology.newStream("spout1", spout). + each(allFields, new Debug("##### vehicles")); + + vehiclesStream + .max(new SpeedComparator()) // Comparator w.r.t speed on received tuple. + .each(vehicleField, new Debug("#### fastest vehicle")) + .project(driverField) + .each(driverField, new Debug("##### fastest driver")); + + vehiclesStream + .maxBy(Vehicle.FIELD_NAME, new EfficiencyComparator()) // Comparator w.r.t efficiency on received tuple. + .each(vehicleField, new Debug("#### most efficient vehicle")); + +``` + +Example applications of these APIs can be located at [TridentMinMaxOfDevicesTopology](https://github.com/apache/storm/blob/master/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentMinMaxOfDevicesTopology.java) and [TridentMinMaxOfVehiclesTopology](https://github.com/apache/storm/blob/master/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentMinMaxOfVehiclesTopology.java) + +### Windowing +Trident streams can process tuples in batches which are of the same window and emit aggregated result to the next operation. +There are two kinds of windowing supported which are based on processing time or tuples count: + 1. Tumbling window + 2. Sliding window + +#### Tumbling window +Tuples are grouped in a single window based on processing time or count. Any tuple belongs to only one of the windows. + +```java + + /** + * Returns a stream of tuples which are aggregated results of a tumbling window with every {@code windowCount} of tuples. + */ + public Stream tumblingWindow(int windowCount, WindowsStoreFactory windowStoreFactory, + Fields inputFields, Aggregator aggregator, Fields functionFields); + + /** + * Returns a stream of tuples which are aggregated results of a window that tumbles at duration of {@code windowDuration} + */ + public Stream tumblingWindow(BaseWindowedBolt.Duration windowDuration, WindowsStoreFactory windowStoreFactory, + Fields inputFields, Aggregator aggregator, Fields functionFields); + +``` + +#### Sliding window +Tuples are grouped in windows and window slides for every sliding interval. A tuple can belong to more than one window. + +```java + + /** + * Returns a stream of tuples which are aggregated results of a sliding window with every {@code windowCount} of tuples + * and slides the window after {@code slideCount}. + */ + public Stream slidingWindow(int windowCount, int slideCount, WindowsStoreFactory windowStoreFactory, + Fields inputFields, Aggregator aggregator, Fields functionFields); + + /** + * Returns a stream of tuples which are aggregated results of a window which slides at duration of {@code slidingInterval} + * and completes a window at {@code windowDuration} + */ + public Stream slidingWindow(BaseWindowedBolt.Duration windowDuration, BaseWindowedBolt.Duration slidingInterval, + WindowsStoreFactory windowStoreFactory, Fields inputFields, Aggregator aggregator, Fields functionFields); +``` + +Examples of tumbling and sliding windows can be found [here](Windowing.html) + +#### Common windowing API +Below is the common windowing API which takes `WindowConfig` for any supported windowing configurations. + +```java + + public Stream window(WindowConfig windowConfig, WindowsStoreFactory windowStoreFactory, Fields inputFields, + Aggregator aggregator, Fields functionFields) + +``` + +`windowConfig` can be any of the below. + - `SlidingCountWindow.of(int windowCount, int slidingCount)` + - `SlidingDurationWindow.of(BaseWindowedBolt.Duration windowDuration, BaseWindowedBolt.Duration slidingDuration)` + - `TumblingCountWindow.of(int windowLength)` + - `TumblingDurationWindow.of(BaseWindowedBolt.Duration windowLength)` + + +Trident windowing APIs need `WindowsStoreFactory` to store received tuples and aggregated values. Currently, basic implementation +for HBase is given with `HBaseWindowsStoreFactory`. It can further be extended to address respective usecases. +Example of using `HBaseWindowStoreFactory` for windowing can be seen below. + +```java + + // window-state table should already be created with cf:tuples column + HBaseWindowsStoreFactory windowStoreFactory = new HBaseWindowsStoreFactory(new HashMap(), "window-state", "cf".getBytes("UTF-8"), "tuples".getBytes("UTF-8")); + FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence"), 3, new Values("the cow jumped over the moon"), + new Values("the man went to the store and bought some candy"), new Values("four score and seven years ago"), + new Values("how many apples can you eat"), new Values("to be or not to be the person")); + spout.setCycle(true); + + TridentTopology topology = new TridentTopology(); + + Stream stream = topology.newStream("spout1", spout).parallelismHint(16).each(new Fields("sentence"), + new Split(), new Fields("word")) + .window(TumblingCountWindow.of(1000), windowStoreFactory, new Fields("word"), new CountAsAggregator(), new Fields("count")) + .peek(new Consumer() { + @Override + public void accept(TridentTuple input) { + LOG.info("Received tuple: [{}]", input); + } + }); + + StormTopology stormTopology = topology.build(); + +``` + +Detailed description of all the above APIs in this section can be found [here](javadocs/org/apache/storm/trident/Stream.html) + +#### Example applications +Example applications of these APIs are located at [TridentHBaseWindowingStoreTopology]({{page.git-blob-base}}/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentHBaseWindowingStoreTopology.java) +and [TridentWindowingInmemoryStoreTopology]({{page.git-blob-base}}/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentWindowingInmemoryStoreTopology.java) + + +### partitionAggregate + +partitionAggregate runs a function on each partition of a batch of tuples. Unlike functions, the tuples emitted by partitionAggregate replace the input tuples given to it. Consider this example: + +```java +mystream.partitionAggregate(new Fields("b"), new Sum(), new Fields("sum")) +``` + +Suppose the input stream contained fields ["a", "b"] and the following partitions of tuples: + +``` +Partition 0: +["a", 1] +["b", 2] + +Partition 1: +["a", 3] +["c", 8] + +Partition 2: +["e", 1] +["d", 9] +["d", 10] +``` + +Then the output stream of that code would contain these tuples with one field called "sum": + +``` +Partition 0: +[3] + +Partition 1: +[11] + +Partition 2: +[20] +``` + +There are three different interfaces for defining aggregators: CombinerAggregator, ReducerAggregator, and Aggregator. + +Here's the interface for CombinerAggregator: + +```java +public interface CombinerAggregator extends Serializable { + T init(TridentTuple tuple); + T combine(T val1, T val2); + T zero(); +} +``` + +A CombinerAggregator returns a single tuple with a single field as output. CombinerAggregators run the init function on each input tuple and use the combine function to combine values until there's only one value left. If there's no tuples in the partition, the CombinerAggregator emits the output of the zero function. For example, here's the implementation of Count: + +```java +public class Count implements CombinerAggregator { + public Long init(TridentTuple tuple) { + return 1L; + } + + public Long combine(Long val1, Long val2) { + return val1 + val2; + } + + public Long zero() { + return 0L; + } +} +``` + +CombinerAggregators offer high efficiency when used with the aggregate method instead of partitionAggregate ([see below](#aggregation-operations)). + +A ReducerAggregator has the following interface: + +```java +public interface ReducerAggregator extends Serializable { + T init(); + T reduce(T curr, TridentTuple tuple); +} +``` + +A ReducerAggregator produces an initial value with init, and then it iterates on that value for each input tuple to produce a single tuple with a single value as output. For example, here's how you would define Count as a ReducerAggregator: + +```java +public class Count implements ReducerAggregator { + public Long init() { + return 0L; + } + + public Long reduce(Long curr, TridentTuple tuple) { + return curr + 1; + } +} +``` + +ReducerAggregator can also be used with persistentAggregate, as you'll see later. + +The most general interface for performing aggregations is Aggregator, which looks like this: + +```java +public interface Aggregator extends Operation { + T init(Object batchId, TridentCollector collector); + void aggregate(T state, TridentTuple tuple, TridentCollector collector); + void complete(T state, TridentCollector collector); +} +``` + +Aggregators can emit any number of tuples with any number of fields. They can emit tuples at any point during execution. Aggregators execute in the following way: + +1. The init method is called before processing the batch. The return value of init is an Object that will represent the state of the aggregation and will be passed into the aggregate and complete methods. +2. The aggregate method is called for each input tuple in the batch partition. This method can update the state and optionally emit tuples. +3. The complete method is called when all tuples for the batch partition have been processed by aggregate. + +Here's how you would implement Count as an Aggregator: + +```java +public class CountAgg extends BaseAggregator { + static class CountState { + long count = 0; + } + + public CountState init(Object batchId, TridentCollector collector) { + return new CountState(); + } + + public void aggregate(CountState state, TridentTuple tuple, TridentCollector collector) { + state.count+=1; + } + + public void complete(CountState state, TridentCollector collector) { + collector.emit(new Values(state.count)); + } +} +``` + +Sometimes you want to execute multiple aggregators at the same time. This is called chaining and can be accomplished like this: + +```java +mystream.chainedAgg() + .partitionAggregate(new Count(), new Fields("count")) + .partitionAggregate(new Fields("b"), new Sum(), new Fields("sum")) + .chainEnd() +``` + +This code will run the Count and Sum aggregators on each partition. The output will contain a single tuple with the fields ["count", "sum"]. + +### stateQuery and partitionPersist + +stateQuery and partitionPersist query and update sources of state, respectively. You can read about how to use them on [Trident state doc](Trident-state.html). + +### projection + +The projection method on Stream keeps only the fields specified in the operation. If you had a Stream with fields ["a", "b", "c", "d"] and you ran this code: + +```java +mystream.project(new Fields("b", "d")) +``` + +The output stream would contain only the fields ["b", "d"]. + + +## Repartitioning operations + +Repartitioning operations run a function to change how the tuples are partitioned across tasks. The number of partitions can also change as a result of repartitioning (for example, if the parallelism hint is greater after repartioning). Repartitioning requires network transfer. Here are the repartitioning functions: + +1. shuffle: Use random round robin algorithm to evenly redistribute tuples across all target partitions +2. broadcast: Every tuple is replicated to all target partitions. This can useful during DRPC – for example, if you need to do a stateQuery on every partition of data. +3. partitionBy: partitionBy takes in a set of fields and does semantic partitioning based on that set of fields. The fields are hashed and modded by the number of target partitions to select the target partition. partitionBy guarantees that the same set of fields always goes to the same target partition. +4. global: All tuples are sent to the same partition. The same partition is chosen for all batches in the stream. +5. batchGlobal: All tuples in the batch are sent to the same partition. Different batches in the stream may go to different partitions. +6. partition: This method takes in a custom partitioning function that implements org.apache.storm.grouping.CustomStreamGrouping + +## Aggregation operations + +Trident has aggregate and persistentAggregate methods for doing aggregations on Streams. aggregate is run on each batch of the stream in isolation, while persistentAggregate will aggregation on all tuples across all batches in the stream and store the result in a source of state. + +Running aggregate on a Stream does a global aggregation. When you use a ReducerAggregator or an Aggregator, the stream is first repartitioned into a single partition, and then the aggregation function is run on that partition. When you use a CombinerAggregator, on the other hand, first Trident will compute partial aggregations of each partition, then repartition to a single partition, and then finish the aggregation after the network transfer. CombinerAggregator's are far more efficient and should be used when possible. + +Here's an example of using aggregate to get a global count for a batch: + +```java +mystream.aggregate(new Count(), new Fields("count")) +``` + +Like partitionAggregate, aggregators for aggregate can be chained. However, if you chain a CombinerAggregator with a non-CombinerAggregator, Trident is unable to do the partial aggregation optimization. + +You can read more about how to use persistentAggregate in the [Trident state doc](Trident-state.html). + +## Operations on grouped streams + +The groupBy operation repartitions the stream by doing a partitionBy on the specified fields, and then within each partition groups tuples together whose group fields are equal. For example, here's an illustration of a groupBy operation: + +![Grouping](images/grouping.png) + +If you run aggregators on a grouped stream, the aggregation will be run within each group instead of against the whole batch. persistentAggregate can also be run on a GroupedStream, in which case the results will be stored in a [MapState]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/trident/state/map/MapState.java) with the key being the grouping fields. You can read more about persistentAggregate in the [Trident state doc](Trident-state.html). + +Like regular streams, aggregators on grouped streams can be chained. + +## Merges and joins + +The last part of the API is combining different streams together. The simplest way to combine streams is to merge them into one stream. You can do that with the TridentTopology#merge method, like so: + +```java +topology.merge(stream1, stream2, stream3); +``` + +Trident will name the output fields of the new, merged stream as the output fields of the first stream. + +Another way to combine streams is with a join. Now, a standard join, like the kind from SQL, require finite input. So they don't make sense with infinite streams. Joins in Trident only apply within each small batch that comes off of the spout. + +Here's an example join between a stream containing fields ["key", "val1", "val2"] and another stream containing ["x", "val1"]: + +```java +topology.join(stream1, new Fields("key"), stream2, new Fields("x"), new Fields("key", "a", "b", "c")); +``` + +This joins stream1 and stream2 together using "key" and "x" as the join fields for each respective stream. Then, Trident requires that all the output fields of the new stream be named, since the input streams could have overlapping field names. The tuples emitted from the join will contain: + +1. First, the list of join fields. In this case, "key" corresponds to "key" from stream1 and "x" from stream2. +2. Next, a list of all non-join fields from all streams, in order of how the streams were passed to the join method. In this case, "a" and "b" correspond to "val1" and "val2" from stream1, and "c" corresponds to "val1" from stream2. + +When a join happens between streams originating from different spouts, those spouts will be synchronized with how they emit batches. That is, a batch of processing will include tuples from each spout. + +You might be wondering – how do you do something like a "windowed join", where tuples from one side of the join are joined against the last hour of tuples from the other side of the join. + +To do this, you would make use of partitionPersist and stateQuery. The last hour of tuples from one side of the join would be stored and rotated in a source of state, keyed by the join field. Then the stateQuery would do lookups by the join field to perform the "join". diff --git a/docs/Trident-RAS-API.md b/docs/Trident-RAS-API.md new file mode 100644 index 00000000000..ce18e02671e --- /dev/null +++ b/docs/Trident-RAS-API.md @@ -0,0 +1,56 @@ +--- +title: Trident RAS API +layout: documentation +documentation: true +--- + +## Trident RAS API + +The Trident RAS (Resource Aware Scheduler) API provides a mechanism to allow users to specify the resource consumption of a Trident topology. The API looks exactly like the base RAS API, only it is called on Trident Streams instead of Bolts and Spouts. + +In order to avoid duplication and inconsistency in documentation, the purpose and effects of resource setting are not described here, but are instead found in the [Resource Aware Scheduler Overview](Resource_Aware_Scheduler_overview.html) + +### Use + +First, an example: + +```java + TridentTopology topo = new TridentTopology(); + topo.setResourceDefaults(new DefaultResourceDeclarer(); + .setMemoryLoad(128) + .setCPULoad(20)); + TridentState wordCounts = + topology + .newStream("words", feeder) + .parallelismHint(5) + .setCPULoad(20) + .setMemoryLoad(512,256) + .each( new Fields("sentence"), new Split(), new Fields("word")) + .setCPULoad(10) + .setMemoryLoad(512) + .each(new Fields("word"), new BangAdder(), new Fields("word!")) + .parallelismHint(10) + .setCPULoad(50) + .setMemoryLoad(1024) + .each(new Fields("word!"), new QMarkAdder(), new Fields("word!?")) + .groupBy(new Fields("word!")) + .persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count")) + .setCPULoad(100) + .setMemoryLoad(2048); +``` + +Resources can be set for each operation (except for grouping, shuffling, partitioning). +Operations that are combined by Trident into single Bolts will have their resources summed. + +Every Bolt is given **at least** the default resources, regardless of user settings. + +In the above case, we end up with + + +- a spout and spout coordinator with a CPU load of 20% each, and a memory load of 512MiB on-heap and 256MiB off-heap. +- a bolt with 80% cpu load (10% + 50% + 20%) and a memory load of 1664MiB (1024 + 512 + 128) on-heap from the combined `Split` and `BangAdder` and the `QMarkAdder` which used the default resources contained in the DefaultResourceDeclarer +- a bolt with 100% cpu load and a memory load of 2048MiB on-heap, with default value for off-heap. + +Resource declarations may be called after any operation. The operations without explicit resources will get the defaults. If you choose to set resources for only some operations, defaults must be declared, or topology submission will fail. +Resource declarations have the same *boundaries* as parallelism hints. They don't cross any groupings, shufflings, or any other kind of repartitioning. +Resources are declared per operation, but get combined within boundaries. diff --git a/docs/Trident-spouts.md b/docs/Trident-spouts.md new file mode 100644 index 00000000000..978881dd341 --- /dev/null +++ b/docs/Trident-spouts.md @@ -0,0 +1,44 @@ +--- +title: Trident Spouts +layout: documentation +documentation: true +--- +# Trident spouts + +Like in the vanilla Storm API, spouts are the source of streams in a Trident topology. On top of the vanilla Storm spouts, Trident exposes additional APIs for more sophisticated spouts. + +There is an inextricable link between how you source your data streams and how you update state (e.g. databases) based on those data streams. See [Trident state doc](Trident-state.html) for an explanation of this – understanding this link is imperative for understanding the spout options available. + +Regular Storm spouts will be non-transactional spouts in a Trident topology. To use a regular Storm IRichSpout, create the stream like this in a TridentTopology: + +```java +TridentTopology topology = new TridentTopology(); +topology.newStream("myspoutid", new MyRichSpout()); +``` + +All spouts in a Trident topology are required to be given a unique identifier for the stream – this identifier must be unique across all topologies run on the cluster. Trident will use this identifier to store metadata about what the spout has consumed in Zookeeper, including the txid and any metadata associated with the spout. + +You can configure the Zookeeper storage of spout metadata via the following configuration options: + +1. `transactional.zookeeper.servers`: A list of Zookeeper hostnames +2. `transactional.zookeeper.port`: The port of the Zookeeper cluster +3. `transactional.zookeeper.root`: The root dir in Zookeeper where metadata is stored. Metadata will be stored at the path / + +## Pipelining + +By default, Trident processes a single batch at a time, waiting for the batch to succeed or fail before trying another batch. You can get significantly higher throughput – and lower latency of processing of each batch – by pipelining the batches. You configure the maximum amount of batches to be processed simultaneously with the "topology.max.spout.pending" property. + +Even while processing multiple batches simultaneously, Trident will order any state updates taking place in the topology among batches. For example, suppose you're doing a global count aggregation into a database. The idea is that while you're updating the count in the database for batch 1, you can still be computing the partial counts for batches 2 through 10. Trident won't move on to the state updates for batch 2 until the state updates for batch 1 have succeeded. This is essential for achieving exactly-once processing semantics, as outline in [Trident state doc](Trident-state.html). + +## Trident spout types + +Here are the following spout APIs available: + +1. [ITridentSpout]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/trident/spout/ITridentSpout.java): The most general API that can support transactional or opaque transactional semantics. Generally you'll use one of the partitioned flavors of this API rather than this one directly. +2. [IBatchSpout]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/trident/spout/IBatchSpout.java): A non-transactional spout that emits batches of tuples at a time +3. [IPartitionedTridentSpout]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/trident/spout/IPartitionedTridentSpout.java): A transactional spout that reads from a partitioned data source (like a cluster of Kafka servers) +4. [IOpaquePartitionedTridentSpout]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/trident/spout/IOpaquePartitionedTridentSpout.java): An opaque transactional spout that reads from a partitioned data source + +And, like mentioned in the beginning of this tutorial, you can use regular IRichSpout's as well. + + diff --git a/docs/Trident-state.md b/docs/Trident-state.md new file mode 100644 index 00000000000..030dd8c9016 --- /dev/null +++ b/docs/Trident-state.md @@ -0,0 +1,331 @@ +--- +title: Trident State +layout: documentation +--- + + +Trident has first-class abstractions for reading from and writing to stateful sources. The state can either be internal to the topology – e.g., kept in-memory and backed by HDFS – or externally stored in a database like Memcached or Cassandra. There's no difference in the Trident API for either case. + +Trident manages state in a fault-tolerant way so that state updates are idempotent in the face of retries and failures. This lets you reason about Trident topologies as if each message were processed exactly-once. + +There's various levels of fault-tolerance possible when doing state updates. Before getting to those, let's look at an example that illustrates the tricks necessary to achieve exactly-once semantics. Suppose that you're doing a count aggregation of your stream and want to store the running count in a database. Now suppose you store in the database a single value representing the count, and every time you process a new tuple you increment the count. + +When failures occur, tuples will be replayed. This brings up a problem when doing state updates (or anything with side effects) – you have no idea if you've ever successfully updated the state based on this tuple before. Perhaps you never processed the tuple before, in which case you should increment the count. Perhaps you've processed the tuple and successfully incremented the count, but the tuple failed processing in another step. In this case, you should not increment the count. Or perhaps you saw the tuple before but got an error when updating the database. In this case, you *should* update the database. + +By just storing the count in the database, you have no idea whether or not this tuple has been processed before. So you need more information in order to make the right decision. Trident provides the following semantics which are sufficient for achieving exactly-once processing semantics: + +1. Tuples are processed as small batches (see [the tutorial](Trident-tutorial.html)) +2. Each batch of tuples is given a unique id called the "transaction id" (txid). If the batch is replayed, it is given the exact same txid. +3. State updates are ordered among batches. That is, the state updates for batch 3 won't be applied until the state updates for batch 2 have succeeded. + +With these primitives, your State implementation can detect whether or not the batch of tuples has been processed before and take the appropriate action to update the state in a consistent way. The action you take depends on the exact semantics provided by your input spouts as to what's in each batch. There's three kinds of spouts possible with respect to fault-tolerance: "non-transactional", "transactional", and "opaque transactional". Likewise, there's three kinds of state possible with respect to fault-tolerance: "non-transactional", "transactional", and "opaque transactional". Let's take a look at each spout type and see what kind of fault-tolerance you can achieve with each. + +## Transactional spouts + +Remember, Trident processes tuples as small batches with each batch being given a unique transaction id. The properties of spouts vary according to the guarantees they can provide as to what's in each batch. A transactional spout has the following properties: + +1. Batches for a given txid are always the same. Replays of batches for a txid will exact same set of tuples as the first time that batch was emitted for that txid. +2. There's no overlap between batches of tuples (tuples are in one batch or another, never multiple). +3. Every tuple is in a batch (no tuples are skipped) + +This is a pretty easy type of spout to understand, the stream is divided into fixed batches that never change. Storm has [an implementation of a transactional spout]({{page.git-tree-base}}/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/trident/KafkaTridentSpoutTransactional.java) for Kafka. + +You might be wondering – why wouldn't you just always use a transactional spout? They're simple and easy to understand. One reason you might not use one is because they're not necessarily very fault-tolerant. For example, the way TransactionalTridentKafkaSpout works is the batch for a txid will contain tuples from all the Kafka partitions for a topic. Once a batch has been emitted, any time that batch is re-emitted in the future the exact same set of tuples must be emitted to meet the semantics of transactional spouts. Now suppose a batch is emitted from TransactionalTridentKafkaSpout, the batch fails to process, and at the same time one of the Kafka nodes goes down. You're now incapable of replaying the same batch as you did before (since the node is down and some partitions for the topic are not unavailable), and processing will halt. + +This is why "opaque transactional" spouts exist – they are fault-tolerant to losing source nodes while still allowing you to achieve exactly-once processing semantics. We'll cover those spouts in the next section though. + +(One side note – once Kafka supports replication, it will be possible to have transactional spouts that are fault-tolerant to node failure, but that feature does not exist yet.) + +Before we get to "opaque transactional" spouts, let's look at how you would design a State implementation that has exactly-once semantics for transactional spouts. This State type is called a "transactional state" and takes advantage of the fact that any given txid is always associated with the exact same set of tuples. + +Suppose your topology computes word count and you want to store the word counts in a key/value database. The key will be the word, and the value will contain the count. You've already seen that storing just the count as the value isn't sufficient to know whether you've processed a batch of tuples before. Instead, what you can do is store the transaction id with the count in the database as an atomic value. Then, when updating the count, you can just compare the transaction id in the database with the transaction id for the current batch. If they're the same, you skip the update – because of the strong ordering, you know for sure that the value in the database incorporates the current batch. If they're different, you increment the count. This logic works because the batch for a txid never changes, and Trident ensures that state updates are ordered among batches. + +Consider this example of why it works. Suppose you are processing txid 3 which consists of the following batch of tuples: + +``` +["man"] +["man"] +["dog"] +``` + +Suppose the database currently holds the following key/value pairs: + +``` +man => [count=3, txid=1] +dog => [count=4, txid=3] +apple => [count=10, txid=2] +``` + +The txid associated with "man" is txid 1. Since the current txid is 3, you know for sure that this batch of tuples is not represented in that count. So you can go ahead and increment the count by 2 and update the txid. On the other hand, the txid for "dog" is the same as the current txid. So you know for sure that the increment from the current batch is already represented in the database for the "dog" key. So you can skip the update. After completing updates, the database looks like this: + +``` +man => [count=5, txid=3] +dog => [count=4, txid=3] +apple => [count=10, txid=2] +``` + +Let's now look at opaque transactional spouts and how to design states for that type of spout. + +## Opaque transactional spouts + +As described before, an opaque transactional spout cannot guarantee that the batch of tuples for a txid remains constant. An opaque transactional spout has the following property: + +1. Every tuple is *successfully* processed in exactly one batch. However, it's possible for a tuple to fail to process in one batch and then succeed to process in a later batch. + +[KafkaTridentSpoutOpaque]({{page.git-tree-base}}/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/trident/KafkaTridentSpoutOpaque.java) is a spout that has this property and is fault-tolerant to losing Kafka nodes. Whenever it's time for KafkaTridentSpoutOpaque to emit a batch, it emits tuples starting from where the last batch finished emitting. This ensures that no tuple is ever skipped or successfully processed by multiple batches. + +With opaque transactional spouts, it's no longer possible to use the trick of skipping state updates if the transaction id in the database is the same as the transaction id for the current batch. This is because the batch may have changed between state updates. + +What you can do is store more state in the database. Rather than store a value and transaction id in the database, you instead store a value, transaction id, and the previous value in the database. Let's again use the example of storing a count in the database. Suppose the partial count for your batch is "2" and it's time to apply a state update. Suppose the value in the database looks like this: + +``` +{ value = 4, + prevValue = 1, + txid = 2 +} +``` + +Suppose your current txid is 3, different than what's in the database. In this case, you set "prevValue" equal to "value", increment "value" by your partial count, and update the txid. The new database value will look like this: + +``` +{ value = 6, + prevValue = 4, + txid = 3 +} +``` + +Now suppose your current txid is 2, equal to what's in the database. Now you know that the "value" in the database contains an update from a previous batch for your current txid, but that batch may have been different so you have to ignore it. What you do in this case is increment "prevValue" by your partial count to compute the new "value". You then set the value in the database to this: + +``` +{ value = 3, + prevValue = 1, + txid = 2 +} +``` + +This works because of the strong ordering of batches provided by Trident. Once Trident moves onto a new batch for state updates, it will never go back to a previous batch. And since opaque transactional spouts guarantee no overlap between batches – that each tuple is successfully processed by one batch – you can safely update based on the previous value. + +## Non-transactional spouts + +Non-transactional spouts don't provide any guarantees about what's in each batch. So it might have at-most-once processing, in which case tuples are not retried after failed batches. Or it might have at-least-once processing, where tuples can be processed successfully by multiple batches. There's no way to achieve exactly-once semantics for this kind of spout. + +## Summary of spout and state types + +This diagram shows which combinations of spouts / states enable exactly-once messaging semantics: + +![Spouts vs States](images/spout-vs-state.png) + +Opaque transactional states have the strongest fault-tolerance, but this comes at the cost of needing to store the txid and two values in the database. Transactional states require less state in the database, but only work with transactional spouts. Finally, non-transactional states require the least state in the database but cannot achieve exactly-once semantics. + +The state and spout types you choose are a tradeoff between fault-tolerance and storage costs, and ultimately your application requirements will determine which combination is right for you. + +## State APIs + +You've seen the intricacies of what it takes to achieve exactly-once semantics. The nice thing about Trident is that it internalizes all the fault-tolerance logic within the State – as a user you don't have to deal with comparing txids, storing multiple values in the database, or anything like that. You can write code like this: + +```java +TridentTopology topology = new TridentTopology(); +TridentState wordCounts = + topology.newStream("spout1", spout) + .each(new Fields("sentence"), new Split(), new Fields("word")) + .groupBy(new Fields("word")) + .persistentAggregate(MemcachedState.opaque(serverLocations), new Count(), new Fields("count")) + .parallelismHint(6); +``` + +All the logic necessary to manage opaque transactional state logic is internalized in the MemcachedState.opaque call. Additionally, updates are automatically batched to minimize roundtrips to the database. + +The base State interface just has two methods: + +```java +public interface State { + void beginCommit(Long txid); // can be null for things like partitionPersist occurring off a DRPC stream + void commit(Long txid); +} +``` + +You're told when a state update is beginning, when a state update is ending, and you're given the txid in each case. Trident assumes nothing about how your state works, what kind of methods there are to update it, and what kind of methods there are to read from it. + +Suppose you have a home-grown database that contains user location information and you want to be able to access it from Trident. Your State implementation would have methods for getting and setting user information: + +```java +public class LocationDB implements State { + public void beginCommit(Long txid) { + } + + public void commit(Long txid) { + } + + public void setLocation(long userId, String location) { + // code to access database and set location + } + + public String getLocation(long userId) { + // code to get location from database + } +} +``` + +You then provide Trident a StateFactory that can create instances of your State object within Trident tasks. The StateFactory for your LocationDB might look something like this: + +```java +public class LocationDBFactory implements StateFactory { + public State makeState(Map conf, int partitionIndex, int numPartitions) { + return new LocationDB(); + } +} +``` + +Trident provides the QueryFunction interface for writing Trident operations that query a source of state, and the StateUpdater interface for writing Trident operations that update a source of state. For example, let's write an operation "QueryLocation" that queries the LocationDB for the locations of users. Let's start off with how you would use it in a topology. Let's say this topology consumes an input stream of userids: + +```java +TridentTopology topology = new TridentTopology(); +TridentState locations = topology.newStaticState(new LocationDBFactory()); +topology.newStream("myspout", spout) + .stateQuery(locations, new Fields("userid"), new QueryLocation(), new Fields("location")) +``` + +Now let's take a look at what the implementation of QueryLocation would look like: + +```java +public class QueryLocation extends BaseQueryFunction { + public List batchRetrieve(LocationDB state, List inputs) { + List ret = new ArrayList(); + for(TridentTuple input: inputs) { + ret.add(state.getLocation(input.getLong(0))); + } + return ret; + } + + public void execute(TridentTuple tuple, String location, TridentCollector collector) { + collector.emit(new Values(location)); + } +} +``` + +QueryFunction's execute in two steps. First, Trident collects a batch of reads together and passes them to batchRetrieve. In this case, batchRetrieve will receive multiple user ids. batchRetrieve is expected to return a list of results that's the same size as the list of input tuples. The first element of the result list corresponds to the result for the first input tuple, the second is the result for the second input tuple, and so on. + +You can see that this code doesn't take advantage of the batching that Trident does, since it just queries the LocationDB one at a time. So a better way to write the LocationDB would be like this: + +```java +public class LocationDB implements State { + public void beginCommit(Long txid) { + } + + public void commit(Long txid) { + } + + public void setLocationsBulk(List userIds, List locations) { + // set locations in bulk + } + + public List bulkGetLocations(List userIds) { + // get locations in bulk + } +} +``` + +Then, you can write the QueryLocation function like this: + +```java +public class QueryLocation extends BaseQueryFunction { + public List batchRetrieve(LocationDB state, List inputs) { + List userIds = new ArrayList(); + for(TridentTuple input: inputs) { + userIds.add(input.getLong(0)); + } + return state.bulkGetLocations(userIds); + } + + public void execute(TridentTuple tuple, String location, TridentCollector collector) { + collector.emit(new Values(location)); + } +} +``` + +This code will be much more efficient by reducing roundtrips to the database. + +To update state, you make use of the StateUpdater interface. Here's a StateUpdater that updates a LocationDB with new location information: + +```java +public class LocationUpdater extends BaseStateUpdater { + public void updateState(LocationDB state, List tuples, TridentCollector collector) { + List ids = new ArrayList(); + List locations = new ArrayList(); + for(TridentTuple t: tuples) { + ids.add(t.getLong(0)); + locations.add(t.getString(1)); + } + state.setLocationsBulk(ids, locations); + } +} +``` + +Here's how you would use this operation in a Trident topology: + +```java +TridentTopology topology = new TridentTopology(); +TridentState locations = + topology.newStream("locations", locationsSpout) + .partitionPersist(new LocationDBFactory(), new Fields("userid", "location"), new LocationUpdater()) +``` + +The partitionPersist operation updates a source of state. The StateUpdater receives the State and a batch of tuples with updates to that State. This code just grabs the userids and locations from the input tuples and does a bulk set into the State. + +partitionPersist returns a TridentState object representing the location db being updated by the Trident topology. You could then use this state in stateQuery operations elsewhere in the topology. + +You can also see that StateUpdaters are given a TridentCollector. Tuples emitted to this collector go to the "new values stream". In this case, there's nothing interesting to emit to that stream, but if you were doing something like updating counts in a database, you could emit the updated counts to that stream. You can then get access to the new values stream for further processing via the TridentState#newValuesStream method. + +## persistentAggregate + +Trident has another method for updating States called persistentAggregate. You've seen this used in the streaming word count example, shown again below: + +```java +TridentTopology topology = new TridentTopology(); +TridentState wordCounts = + topology.newStream("spout1", spout) + .each(new Fields("sentence"), new Split(), new Fields("word")) + .groupBy(new Fields("word")) + .persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count")) +``` + +persistentAggregate is an additional abstraction built on top of partitionPersist that knows how to take a Trident aggregator and use it to apply updates to the source of state. In this case, since this is a grouped stream, Trident expects the state you provide to implement the "MapState" interface. The grouping fields will be the keys in the state, and the aggregation result will be the values in the state. The "MapState" interface looks like this: + +```java +public interface MapState extends State { + List multiGet(List> keys); + List multiUpdate(List> keys, List updaters); + void multiPut(List> keys, List vals); +} +``` + +When you do aggregations on non-grouped streams (a global aggregation), Trident expects your State object to implement the "Snapshottable" interface: + +```java +public interface Snapshottable extends State { + T get(); + T update(ValueUpdater updater); + void set(T o); +} +``` + +[MemoryMapState]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/trident/testing/MemoryMapState.java) and [MemcachedState](https://github.com/nathanmarz/trident-memcached/blob/{{page.version}}/src/jvm/trident/memcached/MemcachedState.java) each implement both of these interfaces. + +## Implementing Map States + +Trident makes it easy to implement MapState's, doing almost all the work for you. The OpaqueMap, TransactionalMap, and NonTransactionalMap classes implement all the logic for doing the respective fault-tolerance logic. You simply provide these classes with an IBackingMap implementation that knows how to do multiGets and multiPuts of the respective key/values. IBackingMap looks like this: + +```java +public interface IBackingMap { + List multiGet(List> keys); + void multiPut(List> keys, List vals); +} +``` + +OpaqueMap's will call multiPut with [OpaqueValue]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/trident/state/OpaqueValue.java)'s for the vals, TransactionalMap's will give [TransactionalValue]({{page.git-blob-base}}/storm-core/src/jvm/org/apache/storm/trident/state/TransactionalValue.java)'s for the vals, and NonTransactionalMaps will just pass the objects from the topology through. + +Trident also provides the [CachedMap]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/trident/state/map/CachedMap.java) class to do automatic LRU caching of map key/vals. + +Finally, Trident provides the [SnapshottableMap]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/trident/state/map/SnapshottableMap.java) class that turns a MapState into a Snapshottable object, by storing global aggregations into a fixed key. + +Take a look at the implementation of [MemcachedState](https://github.com/nathanmarz/trident-memcached/blob/master/src/jvm/trident/memcached/MemcachedState.java) to see how all these utilities can be put together to make a high performance MapState implementation. MemcachedState allows you to choose between opaque transactional, transactional, and non-transactional semantics. diff --git a/docs/Trident-tutorial.md b/docs/Trident-tutorial.md new file mode 100644 index 00000000000..ce26bbedcdc --- /dev/null +++ b/docs/Trident-tutorial.md @@ -0,0 +1,256 @@ +--- +title: Trident Tutorial +layout: documentation +documentation: true +--- + +Trident is a high-level abstraction for doing realtime computing on top of Storm. It allows you to seamlessly intermix high throughput (millions of messages per second), stateful stream processing with low latency distributed querying. If you're familiar with high level batch processing tools like Pig or Cascading, the concepts of Trident will be very familiar – Trident has joins, aggregations, grouping, functions, and filters. In addition to these, Trident adds primitives for doing stateful, incremental processing on top of any database or persistence store. Trident has consistent, exactly-once semantics, so it is easy to reason about Trident topologies. + +Trident developed from an earlier effort to provide exactly-once guarantees for Storm. While this earlier API is no longer present in Storm, the [documentation](Transactional-topologies.html) provides a gentle introduction to some of the concepts used by Trident, and may be worth reading as an addendum to the Trident documentation. + +## Illustrative example + +Let's look at an illustrative example of Trident. This example will do two things: + +1. Compute streaming word count from an input stream of sentences +2. Implement queries to get the sum of the counts for a list of words + +For the purposes of illustration, this example will read an infinite stream of sentences from the following source: + +```java +FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence"), 3, + new Values("the cow jumped over the moon"), + new Values("the man went to the store and bought some candy"), + new Values("four score and seven years ago"), + new Values("how many apples can you eat")); +spout.setCycle(true); +``` + +This spout cycles through that set of sentences over and over to produce the sentence stream. Here's the code to do the streaming word count part of the computation: + +```java +TridentTopology topology = new TridentTopology(); +TridentState wordCounts = + topology.newStream("spout1", spout) + .each(new Fields("sentence"), new Split(), new Fields("word")) + .groupBy(new Fields("word")) + .persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count")) + .parallelismHint(6); +``` + +Let's go through the code line by line. First a TridentTopology object is created, which exposes the interface for constructing Trident computations. TridentTopology has a method called newStream that creates a new stream of data in the topology reading from an input source. In this case, the input source is just the FixedBatchSpout defined from before. Input sources can also be queue brokers like Kestrel or Kafka. Trident keeps track of a small amount of state for each input source (metadata about what it has consumed) in Zookeeper, and the "spout1" string here specifies the node in Zookeeper where Trident should keep that metadata. + +Trident processes the stream as small batches of tuples. For example, the incoming stream of sentences might be divided into batches like so: + +![Batched stream](images/batched-stream.png) + +Generally the size of those small batches will be on the order of thousands or millions of tuples, depending on your incoming throughput. + +Trident provides a fully fledged batch processing API to process those small batches. The API is very similar to what you see in high level abstractions for Hadoop like Pig or Cascading: you can do group by's, joins, aggregations, run functions, run filters, and so on. Of course, processing each small batch in isolation isn't that interesting, so Trident provides functions for doing aggregations across batches and persistently storing those aggregations – whether in memory, in Memcached, in Cassandra, or some other store. Finally, Trident has first-class functions for querying sources of realtime state. That state could be updated by Trident (like in this example), or it could be an independent source of state. + +Back to the example, the spout emits a stream containing one field called "sentence". The next line of the topology definition applies the Split function to each tuple in the stream, taking the "sentence" field and splitting it into words. Each sentence tuple creates potentially many word tuples – for instance, the sentence "the cow jumped over the moon" creates six "word" tuples. Here's the definition of Split: + +```java +public class Split extends BaseFunction { + public void execute(TridentTuple tuple, TridentCollector collector) { + String sentence = tuple.getString(0); + for(String word: sentence.split(" ")) { + collector.emit(new Values(word)); + } + } +} +``` + +As you can see, it's really simple. It simply grabs the sentence, splits it on whitespace, and emits a tuple for each word. + +The rest of the topology computes word count and keeps the results persistently stored. First the stream is grouped by the "word" field. Then, each group is persistently aggregated using the Count aggregator. The persistentAggregate function knows how to store and update the results of the aggregation in a source of state. In this example, the word counts are kept in memory, but this can be trivially swapped to use Memcached, Cassandra, or any other persistent store. Swapping this topology to store counts in Memcached is as simple as replacing the persistentAggregate line with this (using [trident-memcached](https://github.com/nathanmarz/trident-memcached)), where the "serverLocations" is a list of host/ports for the Memcached cluster: + +```java +.persistentAggregate(MemcachedState.transactional(serverLocations), new Count(), new Fields("count")) +MemcachedState.transactional() +``` + +The values stored by persistentAggregate represents the aggregation of all batches ever emitted by the stream. + +One of the cool things about Trident is that it has fully fault-tolerant, exactly-once processing semantics. This makes it easy to reason about your realtime processing. Trident persists state in a way so that if failures occur and retries are necessary, it won't perform multiple updates to the database for the same source data. + +The persistentAggregate method transforms a Stream into a TridentState object. In this case the TridentState object represents all the word counts. We will use this TridentState object to implement the distributed query portion of the computation. + +The next part of the topology implements a low latency distributed query on the word counts. The query takes as input a whitespace separated list of words and return the sum of the counts for those words. These queries are executed just like normal RPC calls, except they are parallelized in the background. Here's an example of how you might invoke one of these queries: + +```java +DRPCClient client = new DRPCClient("drpc.server.location", 3772); +System.out.println(client.execute("words", "cat dog the man"); +// prints the JSON-encoded result, e.g.: "[[5078]]" +``` + +As you can see, it looks just like a regular remote procedure call (RPC), except it's executing in parallel across a Storm cluster. The latency for small queries like this are typically around 10ms. More intense DRPC queries can take longer of course, although the latency largely depends on how many resources you have allocated for the computation. + +The implementation of the distributed query portion of the topology looks like this: + +```java +topology.newDRPCStream("words") + .each(new Fields("args"), new Split(), new Fields("word")) + .groupBy(new Fields("word")) + .stateQuery(wordCounts, new Fields("word"), new MapGet(), new Fields("count")) + .each(new Fields("count"), new FilterNull()) + .aggregate(new Fields("count"), new Sum(), new Fields("sum")); +``` + +The same TridentTopology object is used to create the DRPC stream, and the function is named "words". The function name corresponds to the function name given in the first argument of execute when using a DRPCClient. + +Each DRPC request is treated as its own little batch processing job that takes as input a single tuple representing the request. The tuple contains one field called "args" that contains the argument provided by the client. In this case, the argument is a whitespace separated list of words. + +First, the Split function is used to split the arguments for the request into its constituent words. The stream is grouped by "word", and the stateQuery operator is used to query the TridentState object that the first part of the topology generated. stateQuery takes in a source of state – in this case, the word counts computed by the other portion of the topology – and a function for querying that state. In this case, the MapGet function is invoked, which gets the count for each word. Since the DRPC stream is grouped the exact same way as the TridentState was (by the "word" field), each word query is routed to the exact partition of the TridentState object that manages updates for that word. + +Next, words that didn't have a count are filtered out via the FilterNull filter and the counts are summed using the Sum aggregator to get the result. Then, Trident automatically sends the result back to the waiting client. + +Trident is intelligent about how it executes a topology to maximize performance. There's two interesting things happening automatically in this topology: + +1. Operations that read from or write to state (like persistentAggregate and stateQuery) automatically batch operations to that state. So if there's 20 updates that need to be made to the database for the current batch of processing, rather than do 20 read requests and 20 writes requests to the database, Trident will automatically batch up the reads and writes, doing only 1 read request and 1 write request (and in many cases, you can use caching in your State implementation to eliminate the read request). So you get the best of both words of convenience – being able to express your computation in terms of what should be done with each tuple – and performance. +2. Trident aggregators are heavily optimized. Rather than transfer all tuples for a group to the same machine and then run the aggregator, Trident will do partial aggregations when possible before sending tuples over the network. For example, the Count aggregator computes the count on each partition, sends the partial count over the network, and then sums together all the partial counts to get the total count. This technique is similar to the use of combiners in MapReduce. + +Let's look at another example of Trident. + +## Reach + +The next example is a pure DRPC topology that computes the reach of a URL on demand. Reach is the number of unique people exposed to a URL on Twitter. To compute reach, you need to fetch all the people who ever tweeted a URL, fetch all the followers of all those people, unique that set of followers, and that count that uniqued set. Computing reach is too intense for a single machine – it can require thousands of database calls and tens of millions of tuples. With Storm and Trident, you can parallelize the computation of each step across a cluster. + +This topology will read from two sources of state. One database maps URLs to a list of people who tweeted that URL. The other database maps a person to a list of followers for that person. The topology definition looks like this: + +```java +TridentState urlToTweeters = + topology.newStaticState(getUrlToTweetersState()); +TridentState tweetersToFollowers = + topology.newStaticState(getTweeterToFollowersState()); + +topology.newDRPCStream("reach") + .stateQuery(urlToTweeters, new Fields("args"), new MapGet(), new Fields("tweeters")) + .each(new Fields("tweeters"), new ExpandList(), new Fields("tweeter")) + .shuffle() + .stateQuery(tweetersToFollowers, new Fields("tweeter"), new MapGet(), new Fields("followers")) + .parallelismHint(200) + .each(new Fields("followers"), new ExpandList(), new Fields("follower")) + .groupBy(new Fields("follower")) + .aggregate(new One(), new Fields("one")) + .parallelismHint(20) + .aggregate(new Count(), new Fields("reach")); +``` + +The topology creates TridentState objects representing each external database using the newStaticState method. These can then be queried in the topology. Like all sources of state, queries to these databases will be automatically batched for maximum efficiency. + +The topology definition is straightforward – it's just a simple batch processing job. First, the urlToTweeters database is queried to get the list of people who tweeted the URL for this request. That returns a list, so the ExpandList function is invoked to create a tuple for each tweeter. + +Next, the followers for each tweeter must be fetched. It's important that this step be parallelized, so shuffle is invoked to evenly distribute the tweeters among all workers for the topology. Then, the followers database is queried to get the list of followers for each tweeter. You can see that this portion of the topology is given a large parallelism since this is the most intense portion of the computation. + +Next, the set of followers is uniqued and counted. This is done in two steps. First a "group by" is done on the batch by "follower", running the "One" aggregator on each group. The "One" aggregator simply emits a single tuple containing the number one for each group. Then, the ones are summed together to get the unique count of the followers set. Here's the definition of the "One" aggregator: + +```java +public class One implements CombinerAggregator { + public Integer init(TridentTuple tuple) { + return 1; + } + + public Integer combine(Integer val1, Integer val2) { + return 1; + } + + public Integer zero() { + return 1; + } +} +``` + +This is a "combiner aggregator", which knows how to do partial aggregations before transferring tuples over the network to maximize efficiency. Sum is also defined as a combiner aggregator, so the global sum done at the end of the topology will be very efficient. + +Let's now look at Trident in more detail. + +## Fields and tuples + +The Trident data model is the TridentTuple which is a named list of values. During a topology, tuples are incrementally built up through a sequence of operations. Operations generally take in a set of input fields and emit a set of "function fields". The input fields are used to select a subset of the tuple as input to the operation, while the "function fields" name the fields the operation emits. + +Consider this example. Suppose you have a stream called "stream" that contains the fields "x", "y", and "z". To run a filter MyFilter that takes in "y" as input, you would say: + +```java +stream.each(new Fields("y"), new MyFilter()) +``` + +Suppose the implementation of MyFilter is this: + +```java +public class MyFilter extends BaseFilter { + public boolean isKeep(TridentTuple tuple) { + return tuple.getInteger(0) < 10; + } +} +``` + +This will keep all tuples whose "y" field is less than 10. The TridentTuple given as input to MyFilter will only contain the "y" field. Note that Trident is able to project a subset of a tuple extremely efficiently when selecting the input fields: the projection is essentially free. + +Let's now look at how "function fields" work. Suppose you had this function: + +```java +public class AddAndMultiply extends BaseFunction { + public void execute(TridentTuple tuple, TridentCollector collector) { + int i1 = tuple.getInteger(0); + int i2 = tuple.getInteger(1); + collector.emit(new Values(i1 + i2, i1 * i2)); + } +} +``` + +This function takes two numbers as input and emits two new values: the addition of the numbers and the multiplication of the numbers. Suppose you had a stream with the fields "x", "y", and "z". You would use this function like this: + +```java +stream.each(new Fields("x", "y"), new AddAndMultiply(), new Fields("added", "multiplied")); +``` + +The output of functions is additive: the fields are added to the input tuple. So the output of this each call would contain tuples with the five fields "x", "y", "z", "added", and "multiplied". "added" corresponds to the first value emitted by AddAndMultiply, while "multiplied" corresponds to the second value. + +With aggregators, on the other hand, the function fields replace the input tuples. So if you had a stream containing the fields "val1" and "val2", and you did this: + +```java +stream.aggregate(new Fields("val2"), new Sum(), new Fields("sum")) +``` + +The output stream would only contain a single tuple with a single field called "sum", representing the sum of all "val2" fields in that batch. + +With grouped streams, the output will contain the grouping fields followed by the fields emitted by the aggregator. For example: + +```java +stream.groupBy(new Fields("val1")) + .aggregate(new Fields("val2"), new Sum(), new Fields("sum")) +``` + +In this example, the output will contain the fields "val1" and "sum". + +## State + +A key problem to solve with realtime computation is how to manage state so that updates are idempotent in the face of failures and retries. It's impossible to eliminate failures, so when a node dies or something else goes wrong, batches need to be retried. The question is – how do you do state updates (whether external databases or state internal to the topology) so that it's like each message was only processed only once? + +This is a tricky problem, and can be illustrated with the following example. Suppose that you're doing a count aggregation of your stream and want to store the running count in a database. If you store only the count in the database and it's time to apply a state update for a batch, there's no way to know if you applied that state update before. The batch could have been attempted before, succeeded in updating the database, and then failed at a later step. Or the batch could have been attempted before and failed to update the database. You just don't know. + +Trident solves this problem by doing two things: + +1. Each batch is given a unique id called the "transaction id". If a batch is retried it will have the exact same transaction id. +2. State updates are ordered among batches. That is, the state updates for batch 3 won't be applied until the state updates for batch 2 have succeeded. + +With these two primitives, you can achieve exactly-once semantics with your state updates. Rather than store just the count in the database, what you can do instead is store the transaction id with the count in the database as an atomic value. Then, when updating the count, you can just compare the transaction id in the database with the transaction id for the current batch. If they're the same, you skip the update – because of the strong ordering, you know for sure that the value in the database incorporates the current batch. If they're different, you increment the count. + +Of course, you don't have to do this logic manually in your topologies. This logic is wrapped by the State abstraction and done automatically. Nor is your State object required to implement the transaction id trick: if you don't want to pay the cost of storing the transaction id in the database, you don't have to. In that case the State will have at-least-once-processing semantics in the case of failures (which may be fine for your application). You can read more about how to implement a State and the various fault-tolerance tradeoffs possible [in this doc](Trident-state.html). + +A State is allowed to use whatever strategy it wants to store state. So it could store state in an external database or it could keep the state in-memory but backed by HDFS (like how HBase works). State's are not required to hold onto state forever. For example, you could have an in-memory State implementation that only keeps the last X hours of data available and drops anything older. Take a look at the implementation of the [Memcached integration](https://github.com/nathanmarz/trident-memcached/blob/master/src/jvm/trident/memcached/MemcachedState.java) for an example State implementation. + +## Execution of Trident topologies + +Trident topologies compile down into as efficient of a Storm topology as possible. Tuples are only sent over the network when a repartitioning of the data is required, such as if you do a groupBy or a shuffle. So if you had this Trident topology: + +![Compiling Trident to Storm 1](images/trident-to-storm1.png) + +It would compile into Storm spouts/bolts like this: + +![Compiling Trident to Storm 2](images/trident-to-storm2.png) + +## Conclusion + +Trident makes realtime computation elegant. You've seen how high throughput stream processing, state manipulation, and low-latency querying can be seamlessly intermixed via Trident's API. Trident lets you express your realtime computations in a natural way while still getting maximal performance. diff --git a/docs/Troubleshooting.md b/docs/Troubleshooting.md new file mode 100644 index 00000000000..0bafa4171ff --- /dev/null +++ b/docs/Troubleshooting.md @@ -0,0 +1,148 @@ +--- +title: Troubleshooting +layout: documentation +documentation: true +--- + +This page lists issues people have run into when using Storm along with their solutions. + +### Worker processes are crashing on startup with no stack trace + +Possible symptoms: + + * Topologies work with one node, but workers crash with multiple nodes + +Solutions: + + * You may have a misconfigured subnet, where nodes can't locate other nodes based on their hostname. ZeroMQ sometimes crashes the process when it can't resolve a host. There are two solutions: + * Make a mapping from hostname to IP address in /etc/hosts + * Set up an internal DNS so that nodes can locate each other based on hostname. + +### Nodes are unable to communicate with each other + +Possible symptoms: + + * Every spout tuple is failing + * Processing is not working + +Solutions: + + * Storm doesn't work with ipv6. You can force ipv4 by adding `-Djava.net.preferIPv4Stack=true` to the supervisor child options and restarting the supervisor. + * You may have a misconfigured subnet. See the solutions for `Worker processes are crashing on startup with no stack trace` + +### Topology stops processing tuples after awhile + +Symptoms: + + * Processing works fine for a while, and then suddenly stops and spout tuples start failing en masse. + +Solutions: + + * This is a known issue with ZeroMQ 2.1.10. Downgrade to ZeroMQ 2.1.7. + +### Not all supervisors appear in Storm UI + +Symptoms: + + * Some supervisor processes are missing from the Storm UI + * List of supervisors in Storm UI changes on refreshes + +Solutions: + + * Make sure the supervisor local dirs are independent (e.g., not sharing a local dir over NFS) + * Try deleting the local dirs for the supervisors and restarting the daemons. Supervisors create a unique id for themselves and store it locally. When that id is copied to other nodes, Storm gets confused. + +### "Multiple defaults.yaml found" error + +Symptoms: + + * When deploying a topology with "storm jar", you get this error + +Solution: + + * You're most likely including the Storm jars inside your topology jar. When packaging your topology jar, don't include the Storm jars as Storm will put those on the classpath for you. + +### "NoSuchMethodError" when running storm jar + +Symptoms: + + * When running storm jar, you get a cryptic "NoSuchMethodError" + +Solution: + + * You're deploying your topology with a different version of Storm than you built your topology against. Make sure the storm client you use comes from the same version as the version you compiled your topology against. + + +### Kryo ConcurrentModificationException + +Symptoms: + + * At runtime, you get a stack trace like the following: + +``` +java.lang.RuntimeException: java.util.ConcurrentModificationException + at org.apache.storm.utils.DisruptorQueue.consumeBatchToCursor(DisruptorQueue.java:84) + at org.apache.storm.utils.DisruptorQueue.consumeBatchWhenAvailable(DisruptorQueue.java:55) + at org.apache.storm.disruptor$consume_batch_when_available.invoke(disruptor.clj:56) + at org.apache.storm.disruptor$consume_loop_STAR_$fn__1597.invoke(disruptor.clj:67) + at org.apache.storm.util$async_loop$fn__465.invoke(util.clj:377) + at clojure.lang.AFn.run(AFn.java:24) + at java.lang.Thread.run(Thread.java:679) +Caused by: java.util.ConcurrentModificationException + at java.util.LinkedHashMap$LinkedHashIterator.nextEntry(LinkedHashMap.java:390) + at java.util.LinkedHashMap$EntryIterator.next(LinkedHashMap.java:409) + at java.util.LinkedHashMap$EntryIterator.next(LinkedHashMap.java:408) + at java.util.HashMap.writeObject(HashMap.java:1016) + at sun.reflect.GeneratedMethodAccessor17.invoke(Unknown Source) + at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) + at java.lang.reflect.Method.invoke(Method.java:616) + at java.io.ObjectStreamClass.invokeWriteObject(ObjectStreamClass.java:959) + at java.io.ObjectOutputStream.writeSerialData(ObjectOutputStream.java:1480) + at java.io.ObjectOutputStream.writeOrdinaryObject(ObjectOutputStream.java:1416) + at java.io.ObjectOutputStream.writeObject0(ObjectOutputStream.java:1174) + at java.io.ObjectOutputStream.writeObject(ObjectOutputStream.java:346) + at org.apache.storm.serialization.SerializableSerializer.write(SerializableSerializer.java:21) + at com.esotericsoftware.kryo.Kryo.writeClassAndObject(Kryo.java:554) + at com.esotericsoftware.kryo.serializers.CollectionSerializer.write(CollectionSerializer.java:77) + at com.esotericsoftware.kryo.serializers.CollectionSerializer.write(CollectionSerializer.java:18) + at com.esotericsoftware.kryo.Kryo.writeObject(Kryo.java:472) + at org.apache.storm.serialization.KryoValuesSerializer.serializeInto(KryoValuesSerializer.java:27) +``` + +Solution: + + * This means that you're emitting a mutable object as an output tuple. Everything you emit into the output collector must be immutable. What's happening is that your bolt is modifying the object while it is being serialized to be sent over the network. + + +### Nimbus JVM shuts down right after start up + +Symptoms: + +* When starting storm nimbus, it shuts down straight away with only this logged: + +``` +2024-01-05 18:54:20.404 [o.a.s.v.ConfigValidation] INFO: Will use [class org.apache.storm.DaemonConfig, class org.apache.storm.Config] for validation +2024-01-05 18:54:20.556 [o.a.s.z.AclEnforcement] INFO: SECURITY IS DISABLED NO FURTHER CHECKS... +2024-01-05 18:54:20.740 [o.a.s.m.r.RocksDbStore] INFO: Opening RocksDB from /storm_rocks, storm.metricstore.rocksdb.create_if_missing=true +``` + +* And the JVM exits with an "EXCEPTION_ILLEGAL_INSTRUCTION" like this: + +``` +# +# A fatal error has been detected by the Java Runtime Environment: +# +# EXCEPTION_ILLEGAL_INSTRUCTION (0xc000001d) at pc=0x00007ff94dc7a56d, pid=12728, tid=0x0000000000001d94 +# +# JRE version: OpenJDK Runtime Environment (8.0_232) (build 1.8.0_232-09) +# Java VM: OpenJDK 64-Bit Server VM (25.232-b09 mixed mode windows-amd64 compressed oops) +# Problematic frame: +# C [librocksdbjni4887247215762585789.dll+0x53a56d] +``` + +* And you're running on a pre-Haswell Intel or pre-Excavator AMD CPU. + +Solution: + +* rocksdb-jni from MVN Repository since version 7.0.4 is built for modern CPUs to take advantage of [newer instructions](https://en.wikipedia.org/wiki/X86_Bit_manipulation_instruction_set#BMI2_(Bit_Manipulation_Instruction_Set_2)) for improved performance. Downgrade to version 6.29.5 to resolve this issue. +* Alternatively, recompile rocksdb-jni with PORTABLE=1 as mentioned in the [INSTALL.md](https://github.com/facebook/rocksdb/blob/master/INSTALL.md) link in "Compliing from Source" section of https://github.com/facebook/rocksdb/wiki/RocksJava-Basics. diff --git a/docs/Tutorial.md b/docs/Tutorial.md new file mode 100644 index 00000000000..c4ebffaec44 --- /dev/null +++ b/docs/Tutorial.md @@ -0,0 +1,293 @@ +--- +title: Tutorial +layout: documentation +documentation: true +--- +In this tutorial, you'll learn how to create Storm topologies and deploy them to a Storm cluster. Java will be the main language used, but a few examples will use Python to illustrate Storm's multi-language capabilities. + +## Preliminaries + +This tutorial uses examples from the [storm-starter]({{page.git-blob-base}}/examples/storm-starter) project. It's recommended that you clone the project and follow along with the examples. Read [Setting up a development environment](Setting-up-development-environment.html) and [Creating a new Storm project](Creating-a-new-Storm-project.html) to get your machine set up. + +## Components of a Storm cluster + +A Storm cluster is superficially similar to a Hadoop cluster. Whereas on Hadoop you run "MapReduce jobs", on Storm you run "topologies". "Jobs" and "topologies" themselves are very different -- one key difference is that a MapReduce job eventually finishes, whereas a topology processes messages forever (or until you kill it). + +There are two kinds of nodes on a Storm cluster: the master node and the worker nodes. The master node runs a daemon called "Nimbus" that is similar to Hadoop's "JobTracker". Nimbus is responsible for distributing code around the cluster, assigning tasks to machines, and monitoring for failures. + +Each worker node runs a daemon called the "Supervisor". The supervisor listens for work assigned to its machine and starts and stops worker processes as necessary based on what Nimbus has assigned to it. Each worker process executes a subset of a topology; a running topology consists of many worker processes spread across many machines. + +![Storm cluster](images/storm-cluster.png) + +All coordination between Nimbus and the Supervisors is done through a [Zookeeper](http://zookeeper.apache.org/) cluster. Additionally, the Nimbus daemon and Supervisor daemons are fail-fast and stateless; all state is kept in Zookeeper or on a local disk. This means you can kill -9 Nimbus or the Supervisors and they'll start back up as nothing happened. This design leads to Storm clusters being incredibly stable. + +## Topologies + +To do realtime computation on Storm, you create what are called "topologies". A topology is a graph of computation. Each node in a topology contains processing logic, and links between nodes indicate how data should be passed around between nodes. + +Running a topology is straightforward. First, you package all your code and dependencies into a single jar. Then, you run a command like the following: + +``` +storm jar all-my-code.jar org.apache.storm.MyTopology arg1 arg2 +``` + +This runs the class `org.apache.storm.MyTopology` with the arguments `arg1` and `arg2`. The main function of the class defines the topology and submits it to Nimbus. The `storm jar` part takes care of connecting to Nimbus and uploading the jar. + +Since topology definitions are just Thrift structs, and Nimbus is a Thrift service, you can create and submit topologies using any programming language. The above example is the easiest way to do it from a JVM-based language. See [Running topologies on a production cluster](Running-topologies-on-a-production-cluster.html)] for more information on starting and stopping topologies. + +## Streams + +The core abstraction in Storm is the "stream". A stream is an unbounded sequence of tuples. Storm provides the primitives for transforming a stream into a new stream in a distributed and reliable way. For example, you may transform a stream of tweets into a stream of trending topics. + +The basic primitives Storm provides for doing stream transformations are "spouts" and "bolts". Spouts and bolts have interfaces that you implement to run your application-specific logic. + +A spout is a source of streams. For example, a spout may read tuples off of a [Kestrel](http://github.com/nathanmarz/storm-kestrel) queue and emit them as a stream. Or a spout may connect to the Twitter API and emit a stream of tweets. + +A bolt consumes any number of input streams, does some processing, and possibly emits new streams. Complex stream transformations, like computing a stream of trending topics from a stream of tweets, require multiple steps and thus multiple bolts. Bolts can do anything from run functions, filter tuples, do streaming aggregations, do streaming joins, talk to databases, and more. + +Networks of spouts and bolts are packaged into a "topology" which is the top-level abstraction that you submit to Storm clusters for execution. A topology is a graph of stream transformations where each node is a spout or bolt. Edges in the graph indicate which bolts are subscribing to which streams. When a spout or bolt emits a tuple to a stream, it sends the tuple to every bolt that subscribed to that stream. + +![A Storm topology](images/topology.png) + +Links between nodes in your topology indicate how tuples should be passed around. For example, if there is a link between Spout A and Bolt B, a link from Spout A to Bolt C, and a link from Bolt B to Bolt C, then every time Spout A emits a tuple, it will send the tuple to both Bolt B and Bolt C. All of Bolt B's output tuples will go to Bolt C as well. + +Each node in a Storm topology executes in parallel. In your topology, you can specify how much parallelism you want for each node, and then Storm will spawn that number of threads across the cluster to do the execution. + +A topology runs forever, or until you kill it. Storm will automatically reassign any failed tasks. Additionally, Storm guarantees that there will be no data loss, even if machines go down and messages are dropped. + +## Data model + +Storm uses tuples as its data model. A tuple is a named list of values, and a field in a tuple can be an object of any type. Out of the box, Storm supports all the primitive types, strings, and byte arrays as tuple field values. To use an object of another type, you just need to implement [a serializer](Serialization.html) for the type. + +Every node in a topology must declare the output fields for the tuples it emits. For example, this bolt declares that it emits 2-tuples with the fields "double" and "triple": + +```java +public class DoubleAndTripleBolt extends BaseRichBolt { + private OutputCollectorBase _collector; + + @Override + public void prepare(Map conf, TopologyContext context, OutputCollectorBase collector) { + _collector = collector; + } + + @Override + public void execute(Tuple input) { + int val = input.getInteger(0); + _collector.emit(input, new Values(val*2, val*3)); + _collector.ack(input); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("double", "triple")); + } +} +``` + +The `declareOutputFields` function declares the output fields `["double", "triple"]` for the component. The rest of the bolt will be explained in the upcoming sections. + +## A simple topology + +Let's take a look at a simple topology to explore the concepts more and see how the code shapes up. Let's look at the `ExclamationTopology` definition from storm-starter: + +```java +TopologyBuilder builder = new TopologyBuilder(); +builder.setSpout("words", new TestWordSpout(), 10); +builder.setBolt("exclaim1", new ExclamationBolt(), 3) + .shuffleGrouping("words"); +builder.setBolt("exclaim2", new ExclamationBolt(), 2) + .shuffleGrouping("exclaim1"); +``` + +This topology contains a spout and two bolts. The spout emits words, and each bolt appends the string "!!!" to its input. The nodes are arranged in a line: the spout emits to the first bolt which then emits to the second bolt. If the spout emits the tuples ["bob"] and ["john"], then the second bolt will emit the words ["bob!!!!!!"] and ["john!!!!!!"]. + +This code defines the nodes using the `setSpout` and `setBolt` methods. These methods take as input a user-specified id, an object containing the processing logic, and the amount of parallelism you want for the node. In this example, the spout is given id "words" and the bolts are given ids "exclaim1" and "exclaim2". + +The object containing the processing logic implements the [IRichSpout](javadocs/org/apache/storm/topology/IRichSpout.html) interface for spouts and the [IRichBolt](javadocs/org/apache/storm/topology/IRichBolt.html) interface for bolts. + +The last parameter, how much parallelism you want for the node, is optional. It indicates how many threads should execute that component across the cluster. If you omit it, Storm will only allocate one thread for that node. + +`setBolt` returns an [InputDeclarer](javadocs/org/apache/storm/topology/InputDeclarer.html) object that is used to define the inputs to the Bolt. Here, component "exclaim1" declares that it wants to read all the tuples emitted by component "words" using a shuffle grouping, and component "exclaim2" declares that it wants to read all the tuples emitted by component "exclaim1" using a shuffle grouping. "shuffle grouping" means that tuples should be randomly distributed from the input tasks to the bolt's tasks. There are many ways to group data between components. These will be explained in a few sections. + +If you wanted component "exclaim2" to read all the tuples emitted by both component "words" and component "exclaim1", you would write component "exclaim2"'s definition like this: + +```java +builder.setBolt("exclaim2", new ExclamationBolt(), 5) + .shuffleGrouping("words") + .shuffleGrouping("exclaim1"); +``` + +As you can see, input declarations can be chained to specify multiple sources for the Bolt. + +Let's dig into the implementations of the spouts and bolts in this topology. Spouts are responsible for emitting new messages into the topology. `TestWordSpout` in this topology emits a random word from the list ["nathan", "mike", "jackson", "golda", "bertels"] as a 1-tuple every 100ms. The implementation of `nextTuple()` in TestWordSpout looks like this: + +```java +public void nextTuple() { + Utils.sleep(100); + final String[] words = new String[] {"nathan", "mike", "jackson", "golda", "bertels"}; + final Random rand = new Random(); + final String word = words[rand.nextInt(words.length)]; + _collector.emit(new Values(word)); +} +``` + +As you can see, the implementation is very straightforward. + +`ExclamationBolt` appends the string "!!!" to its input. Let's take a look at the full implementation for `ExclamationBolt`: + +```java +public static class ExclamationBolt implements IRichBolt { + OutputCollector _collector; + + @Override + public void prepare(Map conf, TopologyContext context, OutputCollector collector) { + _collector = collector; + } + + @Override + public void execute(Tuple tuple) { + _collector.emit(tuple, new Values(tuple.getString(0) + "!!!")); + _collector.ack(tuple); + } + + @Override + public void cleanup() { + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("word")); + } + + @Override + public Map getComponentConfiguration() { + return null; + } +} +``` + +The `prepare` method provides the bolt with an `OutputCollector` that is used for emitting tuples from this bolt. Tuples can be emitted at any time from the bolt -- in the `prepare`, `execute`, or `cleanup` methods, or even asynchronously in another thread. This `prepare` implementation simply saves the `OutputCollector` as an instance variable to be used later on in the `execute` method. + +The `execute` method receives a tuple from one of the bolt's inputs. The `ExclamationBolt` grabs the first field from the tuple and emits a new tuple with the string "!!!" appended to it. If you implement a bolt that subscribes to multiple input sources, you can find out which component the [Tuple](/javadoc/apidocs/org/apache/storm/tuple/Tuple.html) came from by using the `Tuple#getSourceComponent` method. + +There are a few other things going on in the `execute` method, namely that the input tuple is passed as the first argument to `emit` and the input tuple is acked on the final line. These are part of Storm's reliability API for guaranteeing no data loss and will be explained later in this tutorial. + +The `cleanup` method is called when a Bolt is being shutdown and should cleanup any resources that were opened. There's no guarantee that this method will be called on the cluster: for example, if the machine the task is running on blows up, there's no way to invoke the method. The `cleanup` method is intended for when you run topologies in [local mode](Local-mode.html) (where a Storm cluster is simulated in a process), and you want to be able to run and kill many topologies without suffering any resource leaks. + +The `declareOutputFields` method declares that the `ExclamationBolt` emits 1-tuples with one field called "word". + +The `getComponentConfiguration` method allows you to configure various aspects of how this component runs. This is a more advanced topic that is explained further on [Configuration](Configuration.html). + +Methods like `cleanup` and `getComponentConfiguration` are often not needed in a bolt implementation. You can define bolts more succinctly by using a base class that provides default implementations where appropriate. `ExclamationBolt` can be written more succinctly by extending `BaseRichBolt`, like so: + +```java +public static class ExclamationBolt extends BaseRichBolt { + OutputCollector _collector; + + @Override + public void prepare(Map conf, TopologyContext context, OutputCollector collector) { + _collector = collector; + } + + @Override + public void execute(Tuple tuple) { + _collector.emit(tuple, new Values(tuple.getString(0) + "!!!")); + _collector.ack(tuple); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("word")); + } +} +``` + +## Running ExclamationTopology in local mode + +Let's see how to run the `ExclamationTopology` in local mode and see that it's working. + +Storm has two modes of operation: local mode and distributed mode. In local mode, Storm executes completely in a process by simulating worker nodes with threads. Local mode is useful for testing and development of topologies. You can read more about running topologies in local mode on [Local mode](Local-mode.html). + +To run a topology in local mode run the command `storm local` instead of `storm jar`. + +## Stream groupings + +A stream grouping tells a topology how to send tuples between two components. Remember, spouts and bolts execute in parallel as many tasks across the cluster. If you look at how a topology is executing at the task level, it looks something like this: + +![Tasks in a topology](images/topology-tasks.png) + +When a task for Bolt A emits a tuple to Bolt B, which task should it send the tuple to? + +A "stream grouping" answers this question by telling Storm how to send tuples between sets of tasks. Before we dig into the different kinds of stream groupings, let's take a look at another topology from [storm-starter]({{page.git-blob-base}}/examples/storm-starter). This [WordCountTopology]({{page.git-blob-base}}/examples/storm-starter/src/jvm/org/apache/storm/starter/WordCountTopology.java) reads sentences off of a spout and streams out of `WordCountBolt` the total number of times it has seen that word before: + +```java +TopologyBuilder builder = new TopologyBuilder(); + +builder.setSpout("sentences", new RandomSentenceSpout(), 5); +builder.setBolt("split", new SplitSentence(), 8) + .shuffleGrouping("sentences"); +builder.setBolt("count", new WordCount(), 12) + .fieldsGrouping("split", new Fields("word")); +``` + +`SplitSentence` emits a tuple for each word in each sentence it receives, and `WordCount` keeps a map in memory from word to count. Each time `WordCount` receives a word, it updates its state and emits the new word count. + +There are a few different kinds of stream groupings. + +The simplest kind of grouping is called a "shuffle grouping" which sends the tuple to a random task. A shuffle grouping is used in the `WordCountTopology` to send tuples from `RandomSentenceSpout` to the `SplitSentence` bolt. It has the effect of evenly distributing the work of processing the tuples across all of `SplitSentence` bolt's tasks. + +A more interesting kind of grouping is the "fields grouping". A fields grouping is used between the `SplitSentence` bolt and the `WordCount` bolt. It is critical for the functioning of the `WordCount` bolt that the same word always goes to the same task. Otherwise, more than one task will see the same word, and they'll each emit incorrect values for the count since each has incomplete information. A fields grouping lets you group a stream by a subset of its fields. This causes equal values for that subset of fields to go to the same task. Since `WordCount` subscribes to `SplitSentence`'s output stream using a fields grouping on the "word" field, the same word always goes to the same task and the bolt produces the correct output. + +Fields groupings are the basis of implementing streaming joins and streaming aggregations as well as a plethora of other use cases. Underneath the hood, fields groupings are implemented using mod hashing. + +There are a few other kinds of stream groupings. You can read more about them on [Concepts](Concepts.html). + +## Defining Bolts in other languages + +Bolts can be defined in any language. Bolts written in another language are executed as subprocesses, and Storm communicates with those subprocesses with JSON messages over stdin/stdout. The communication protocol just requires an ~100 line adapter library, and Storm ships with adapter libraries for Ruby, Python, and Fancy. + +Here's the definition of the `SplitSentence` bolt from `WordCountTopology`: + +```java +public static class SplitSentence extends ShellBolt implements IRichBolt { + public SplitSentence() { + super("python3", "splitsentence.py"); + } + + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("word")); + } +} +``` + +`SplitSentence` overrides `ShellBolt` and declares it as running using `python3` with the arguments `splitsentence.py`. Here's the implementation of `splitsentence.py`: + +```python +import storm + +class SplitSentenceBolt(storm.BasicBolt): + def process(self, tup): + words = tup.values[0].split(" ") + for word in words: + storm.emit([word]) + +SplitSentenceBolt().run() +``` + +For more information on writing spouts and bolts in other languages, and to learn about how to create topologies in other languages (and avoid the JVM completely), see [Using non-JVM languages with Storm](Using-non-JVM-languages-with-Storm.html). + +## Guaranteeing message processing + +Earlier on in this tutorial, we skipped over a few aspects of how tuples are emitted. Those aspects were part of Storm's reliability API: how Storm guarantees that every message coming off a spout will be fully processed. See [Guaranteeing message processing](Guaranteeing-message-processing.html) for information on how this works and what you have to do as a user to take advantage of Storm's reliability capabilities. + +## Trident + +Storm guarantees that every message will be played through the topology at least once. A common question asked is "how do you do things like counting on top of Storm? Won't you overcount?" Storm has a higher level API called Trident that let you achieve exactly-once messaging semantics for most computations. Read more about Trident [here](Trident-tutorial.html). + +## Distributed RPC + +This tutorial showed how to do basic stream processing on top of Storm. There are lots more things you can do with Storm's primitives. One of the most interesting applications of Storm is Distributed RPC, where you parallelize the computation of intense functions on the fly. Read more about Distributed RPC [here](Distributed-RPC.html). + +## Conclusion + +This tutorial gave a broad overview of developing, testing, and deploying Storm topologies. The rest of the documentation dives deeper into all the aspects of using Storm. diff --git a/docs/Understanding-the-parallelism-of-a-Storm-topology.md b/docs/Understanding-the-parallelism-of-a-Storm-topology.md new file mode 100644 index 00000000000..c48102de22e --- /dev/null +++ b/docs/Understanding-the-parallelism-of-a-Storm-topology.md @@ -0,0 +1,123 @@ +--- +title: Understanding the Parallelism of a Storm Topology +layout: documentation +documentation: true +--- +## What makes a running topology: worker processes, executors and tasks + +Storm distinguishes between the following three main entities that are used to actually run a topology in a Storm cluster: + +1. Worker processes +2. Executors (threads) +3. Tasks + +Here is a simple illustration of their relationships: + +![The relationships of worker processes, executors (threads) and tasks in Storm](images/relationships-worker-processes-executors-tasks.png) + +A _worker process_ executes a subset of a topology. A worker process belongs to a specific topology and may run one or more executors for one or more components (spouts or bolts) of this topology. A running topology consists of many such processes running on many machines within a Storm cluster. + +An _executor_ is a thread that is spawned by a worker process. It may run one or more tasks for the same component (spout or bolt). + +A _task_ performs the actual data processing — each spout or bolt that you implement in your code executes as many tasks across the cluster. The number of tasks for a component is always the same throughout the lifetime of a topology, but the number of executors (threads) for a component can change over time. This means that the following condition holds true: ``#threads ≤ #tasks``. By default, the number of tasks is set to be the same as the number of executors, i.e. Storm will run one task per thread. + +## Configuring the parallelism of a topology + +Note that in Storm’s terminology "parallelism" is specifically used to describe the so-called _parallelism hint_, which means the initial number of executor (threads) of a component. In this document though we use the term "parallelism" in a more general sense to describe how you can configure not only the number of executors but also the number of worker processes and the number of tasks of a Storm topology. We will specifically call out when "parallelism" is used in the normal, narrow definition of Storm. + +The following sections give an overview of the various configuration options and how to set them in your code. There is more than one way of setting these options though, and the table lists only some of them. Storm currently has the following [order of precedence for configuration settings](Configuration.html): ``defaults.yaml`` < ``storm.yaml`` < topology-specific configuration < internal component-specific configuration < external component-specific configuration. + +### Number of worker processes + +* Description: How many worker processes to create _for the topology_ across machines in the cluster. +* Configuration option: [TOPOLOGY_WORKERS](javadocs/org/apache/storm/Config.html#TOPOLOGY_WORKERS) +* How to set in your code (examples): + * [Config#setNumWorkers](javadocs/org/apache/storm/Config.html) + +### Number of executors (threads) + +* Description: How many executors to spawn _per component_. +* Configuration option: None (pass ``parallelism_hint`` parameter to ``setSpout`` or ``setBolt``) +* How to set in your code (examples): + * [TopologyBuilder#setSpout()](javadocs/org/apache/storm/topology/TopologyBuilder.html) + * [TopologyBuilder#setBolt()](javadocs/org/apache/storm/topology/TopologyBuilder.html) + * Note that as of Storm 0.8 the ``parallelism_hint`` parameter now specifies the initial number of executors (not tasks!) for that bolt. + +### Number of tasks + +* Description: How many tasks to create _per component_. +* Configuration option: [TOPOLOGY_TASKS](javadocs/org/apache/storm/Config.html#TOPOLOGY_TASKS) +* How to set in your code (examples): + * [ComponentConfigurationDeclarer#setNumTasks()](javadocs/org/apache/storm/topology/ComponentConfigurationDeclarer.html) + + +Here is an example code snippet to show these settings in practice: + +```java +topologyBuilder.setBolt("green-bolt", new GreenBolt(), 2) + .setNumTasks(4) + .shuffleGrouping("blue-spout"); +``` + +In the above code we configured Storm to run the bolt ``GreenBolt`` with an initial number of two executors and four associated tasks. Storm will run two tasks per executor (thread). If you do not explicitly configure the number of tasks, Storm will run by default one task per executor. + +## Example of a running topology + +The following illustration shows how a simple topology would look like in operation. The topology consists of three components: one spout called ``BlueSpout`` and two bolts called ``GreenBolt`` and ``YellowBolt``. The components are linked such that ``BlueSpout`` sends its output to ``GreenBolt``, which in turns sends its own output to ``YellowBolt``. + +![Example of a running topology in Storm](images/example-of-a-running-topology.png) + +The ``GreenBolt`` was configured as per the code snippet above whereas ``BlueSpout`` and ``YellowBolt`` only set the parallelism hint (number of executors). Here is the relevant code: + +```java +Config conf = new Config(); +conf.setNumWorkers(2); // use two worker processes + +topologyBuilder.setSpout("blue-spout", new BlueSpout(), 2); // set parallelism hint to 2 + +topologyBuilder.setBolt("green-bolt", new GreenBolt(), 2) + .setNumTasks(4) + .shuffleGrouping("blue-spout"); + +topologyBuilder.setBolt("yellow-bolt", new YellowBolt(), 6) + .shuffleGrouping("green-bolt"); + +StormSubmitter.submitTopology( + "mytopology", + conf, + topologyBuilder.createTopology() + ); +``` + +And of course Storm comes with additional configuration settings to control the parallelism of a topology, including: + +* [TOPOLOGY_MAX_TASK_PARALLELISM](javadocs/org/apache/storm/Config.html#TOPOLOGY_MAX_TASK_PARALLELISM): This setting puts a ceiling on the number of executors that can be spawned for a single component. It is typically used during testing to limit the number of threads spawned when running a topology in local mode. You can set this option via e.g. [Config#setMaxTaskParallelism()](javadocs/org/apache/storm/Config.html#setMaxTaskParallelism(int)). + +## How to change the parallelism of a running topology + +A nifty feature of Storm is that you can increase or decrease the number of worker processes and/or executors without being required to restart the cluster or the topology. The act of doing so is called rebalancing. + +You have two options to rebalance a topology: + +1. Use the Storm web UI to rebalance the topology. +2. Use the CLI tool storm rebalance as described below. + +Here is an example of using the CLI tool: + +``` +## Reconfigure the topology "mytopology" to use 5 worker processes, +## the spout "blue-spout" to use 3 executors and +## the bolt "yellow-bolt" to use 10 executors. + +$ storm rebalance mytopology -n 5 -e blue-spout=3 -e yellow-bolt=10 +``` + +## References + +* [Concepts](Concepts.html) +* [Configuration](Configuration.html) +* [Running topologies on a production cluster](Running-topologies-on-a-production-cluster.html) +* [Local mode](Local-mode.html) +* [Tutorial](Tutorial.html) +* [Storm API documentation](javadocs/), most notably the class ``Config`` + diff --git a/docs/Using-non-JVM-languages-with-Storm.md b/docs/Using-non-JVM-languages-with-Storm.md new file mode 100644 index 00000000000..da809340906 --- /dev/null +++ b/docs/Using-non-JVM-languages-with-Storm.md @@ -0,0 +1,53 @@ +--- +title: Using non JVM languages with Storm +layout: documentation +--- +- two pieces: creating topologies and implementing spouts and bolts in other languages +- creating topologies in another language is easy since topologies are just thrift structures (link to storm.thrift) +- implementing spouts and bolts in another language is called a "multilang components" or "shelling" + - Here's a specification of the protocol: [Multilang protocol](Multilang-protocol.html) + - the thrift structure lets you define multilang components explicitly as a program and a script (e.g., python3 and the file implementing your bolt) + - In Java, you override ShellBolt or ShellSpout to create multilang components + - note that output fields declarations happens in the thrift structure, so in Java you create multilang components like the following: + - declare fields in java, processing code in the other language by specifying it in constructor of shellbolt + - multilang uses json messages over stdin/stdout to communicate with the subprocess + - storm comes with Ruby, Python, and Fancy adapters that implement the protocol. show an example of Python + - Python supports emitting, anchoring, acking, and logging +- "storm shell" command makes constructing jar and uploading to nimbus easy + - makes jar and uploads it + - calls your program with host/port of nimbus and the jarfile id + +## Notes on implementing a DSL in a non-JVM language + +The right place to start is src/storm.thrift. Since Storm topologies are just Thrift structures, and Nimbus is a Thrift daemon, you can create and submit topologies in any language. + +When you create the Thrift structs for spouts and bolts, the code for the spout or bolt is specified in the ComponentObject struct: + +``` +union ComponentObject { + 1: binary serialized_java; + 2: ShellComponent shell; + 3: JavaObject java_object; +} +``` + +For a non-JVM DSL, you would want to make use of "2" and "3". ShellComponent lets you specify a script to run that component (e.g., your Python code). And JavaObject lets you specify native java spouts and bolts for the component (and Storm will use reflection to create that spout or bolt). + +There's a "storm shell" command that will help with submitting a topology. Its usage is like this: + +``` +storm shell resources/ python3 topology.py arg1 arg2 +``` + +storm shell will then package resources/ into a jar, upload the jar to Nimbus, and call your topology.py script like this: + +``` +python3 topology.py arg1 arg2 {nimbus-host} {nimbus-port} {uploaded-jar-location} +``` + +Then you can connect to Nimbus using the Thrift API and submit the topology, passing {uploaded-jar-location} into the submitTopology method. For reference, here's the submitTopology definition: + +``` +void submitTopology(1: string name, 2: string uploadedJarLocation, 3: string jsonConf, 4: StormTopology topology) + throws (1: AlreadyAliveException e, 2: InvalidTopologyException ite); +``` diff --git a/docs/Windowing.md b/docs/Windowing.md new file mode 100644 index 00000000000..bf625d2603a --- /dev/null +++ b/docs/Windowing.md @@ -0,0 +1,376 @@ +--- +title: Windowing Support in Core Storm +layout: documentation +documentation: true +--- + +Storm core has support for processing a group of tuples that falls within a window. Windows are specified with the +following two parameters, + +1. Window length - the length or duration of the window +2. Sliding interval - the interval at which the windowing slides + +## Sliding Window + +Tuples are grouped in windows and window slides every sliding interval. A tuple can belong to more than one window. + +For example a time duration based sliding window with length 10 secs and sliding interval of 5 seconds. + +``` +........| e1 e2 | e3 e4 e5 e6 | e7 e8 e9 |... +-5 0 5 10 15 -> time +|<------- w1 -->| + |<---------- w2 ----->| + |<-------------- w3 ---->| +``` + +The window is evaluated every 5 seconds and some of the tuples in the first window overlaps with the second one. + +Note: The window first slides at t = 5 secs and would contain events received up to the first five secs. + +## Tumbling Window + +Tuples are grouped in a single window based on time or count. Any tuple belongs to only one of the windows. + +For example a time duration based tumbling window with length 5 secs. + +``` +| e1 e2 | e3 e4 e5 e6 | e7 e8 e9 |... +0 5 10 15 -> time + w1 w2 w3 +``` + +The window is evaluated every five seconds and none of the windows overlap. + +Storm supports specifying the window length and sliding intervals as a count of the number of tuples or as a time duration. + +The bolt interface `IWindowedBolt` is implemented by bolts that needs windowing support. + +```java +public interface IWindowedBolt extends IComponent { + void prepare(Map stormConf, TopologyContext context, OutputCollector collector); + /** + * Process tuples falling within the window and optionally emit + * new tuples based on the tuples in the input window. + */ + void execute(TupleWindow inputWindow); + void cleanup(); +} +``` + +Every time the window activates, the `execute` method is invoked. The TupleWindow parameter gives access to the current tuples +in the window, the tuples that expired and the new tuples that are added since last window was computed which will be useful +for efficient windowing computations. + +Bolts that needs windowing support typically would extend `BaseWindowedBolt` which has the apis for specifying the +window length and sliding intervals. + +E.g. + +```java +public class SlidingWindowBolt extends BaseWindowedBolt { + private OutputCollector collector; + + @Override + public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) { + this.collector = collector; + } + + @Override + public void execute(TupleWindow inputWindow) { + for(Tuple tuple: inputWindow.get()) { + // do the windowing computation + ... + } + // emit the results + collector.emit(new Values(computedValue)); + } +} + +public static void main(String[] args) { + TopologyBuilder builder = new TopologyBuilder(); + builder.setSpout("spout", new RandomSentenceSpout(), 1); + builder.setBolt("slidingwindowbolt", + new SlidingWindowBolt().withWindow(new Count(30), new Count(10)), + 1).shuffleGrouping("spout"); + Config conf = new Config(); + conf.setDebug(true); + conf.setNumWorkers(1); + + StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology()); + +} +``` + +The following window configurations are supported. + +```java +withWindow(Count windowLength, Count slidingInterval) +Tuple count based sliding window that slides after `slidingInterval` number of tuples. + +withWindow(Count windowLength) +Tuple count based window that slides with every incoming tuple. + +withWindow(Count windowLength, Duration slidingInterval) +Tuple count based sliding window that slides after `slidingInterval` time duration. + +withWindow(Duration windowLength, Duration slidingInterval) +Time duration based sliding window that slides after `slidingInterval` time duration. + +withWindow(Duration windowLength) +Time duration based window that slides with every incoming tuple. + +withWindow(Duration windowLength, Count slidingInterval) +Time duration based sliding window configuration that slides after `slidingInterval` number of tuples. + +withTumblingWindow(BaseWindowedBolt.Count count) +Count based tumbling window that tumbles after the specified count of tuples. + +withTumblingWindow(BaseWindowedBolt.Duration duration) +Time duration based tumbling window that tumbles after the specified time duration. + +``` + +## Tuple timestamp and out of order tuples +By default the timestamp tracked in the window is the time when the tuple is processed by the bolt. The window calculations +are performed based on the processing timestamp. Storm has support for tracking windows based on the source generated timestamp. + +```java +/** +* Specify a field in the tuple that represents the timestamp as a long value. If this +* field is not present in the incoming tuple, an {@link IllegalArgumentException} will be thrown. +* +* @param fieldName the name of the field that contains the timestamp +*/ +public BaseWindowedBolt withTimestampField(String fieldName) +``` + +The value for the above `fieldName` will be looked up from the incoming tuple and considered for windowing calculations. +If the field is not present in the tuple an exception will be thrown. Alternatively a [TimestampExtractor](../storm-client/src/jvm/org/apache/storm/windowing/TimestampExtractor.java) can be used to +derive a timestamp value from a tuple (e.g. extract timestamp from a nested field within the tuple). + +```java +/** +* Specify the timestamp extractor implementation. +* +* @param timestampExtractor the {@link TimestampExtractor} implementation +*/ +public BaseWindowedBolt withTimestampExtractor(TimestampExtractor timestampExtractor) +``` + + +Along with the timestamp field name/extractor, a time lag parameter can also be specified which indicates the max time limit for tuples with out of order timestamps. + +```java +/** +* Specify the maximum time lag of the tuple timestamp in milliseconds. It means that the tuple timestamps +* cannot be out of order by more than this amount. +* +* @param duration the max lag duration +*/ +public BaseWindowedBolt withLag(Duration duration) +``` + +E.g. If the lag is 5 secs and a tuple `t1` arrived with timestamp `06:00:05` no tuples may arrive with tuple timestamp earlier than `06:00:00`. If a tuple +arrives with timestamp 05:59:59 after `t1` and the window has moved past `t1`, it will be treated as a late tuple. Late tuples are not processed by default, +just logged in the worker log files at INFO level. + +```java +/** + * Specify a stream id on which late tuples are going to be emitted. They are going to be accessible via the + * {@link org.apache.storm.topology.WindowedBoltExecutor#LATE_TUPLE_FIELD} field. + * It must be defined on a per-component basis, and in conjunction with the + * {@link BaseWindowedBolt#withTimestampField}, otherwise {@link IllegalArgumentException} will be thrown. + * + * @param streamId the name of the stream used to emit late tuples on + */ +public BaseWindowedBolt withLateTupleStream(String streamId) + +``` +This behaviour can be changed by specifying the above `streamId`. In this case late tuples are going to be emitted on the specified stream and accessible +via the field `WindowedBoltExecutor.LATE_TUPLE_FIELD`. + + +### Watermarks +For processing tuples with timestamp field, storm internally computes watermarks based on the incoming tuple timestamp. Watermark is +the minimum of the latest tuple timestamps (minus the lag) across all the input streams. At a higher level this is similar to the watermark concept +used by Flink and Google's MillWheel for tracking event based timestamps. + +Periodically (default every sec), the watermark timestamps are emitted and this is considered as the clock tick for the window calculation if +tuple based timestamps are in use. The interval at which watermarks are emitted can be changed with the below api. + +```java +/** +* Specify the watermark event generation interval. For tuple based timestamps, watermark events +* are used to track the progress of time +* +* @param interval the interval at which watermark events are generated +*/ +public BaseWindowedBolt withWatermarkInterval(Duration interval) +``` + + +When a watermark is received, all windows up to that timestamp will be evaluated. + +For example, consider tuple timestamp based processing with following window parameters, + +`Window length = 20s, sliding interval = 10s, watermark emit frequency = 1s, max lag = 5s` + +``` +|-----|-----|-----|-----|-----|-----|-----| +0 10 20 30 40 50 60 70 +```` + +Current ts = `09:00:00` + +Tuples `e1(6:00:03), e2(6:00:05), e3(6:00:07), e4(6:00:18), e5(6:00:26), e6(6:00:36)` are received between `9:00:00` and `9:00:01` + +At time t = `09:00:01`, watermark w1 = `6:00:31` is emitted since no tuples earlier than `6:00:31` can arrive. + +Three windows will be evaluated. The first window end ts (06:00:10) is computed by taking the earliest event timestamp (06:00:03) +and computing the ceiling based on the sliding interval (10s). + +1. `5:59:50 - 06:00:10` with tuples e1, e2, e3 +2. `6:00:00 - 06:00:20` with tuples e1, e2, e3, e4 +3. `6:00:10 - 06:00:30` with tuples e4, e5 + +e6 is not evaluated since watermark timestamp `6:00:31` is older than the tuple ts `6:00:36`. + +Tuples `e7(8:00:25), e8(8:00:26), e9(8:00:27), e10(8:00:39)` are received between `9:00:01` and `9:00:02` + +At time t = `09:00:02` another watermark w2 = `08:00:34` is emitted since no tuples earlier than `8:00:34` can arrive now. + +Three windows will be evaluated, + +1. `6:00:20 - 06:00:40` with tuples e5, e6 (from earlier batch) +2. `6:00:30 - 06:00:50` with tuple e6 (from earlier batch) +3. `8:00:10 - 08:00:30` with tuples e7, e8, e9 + +e10 is not evaluated since the tuple ts `8:00:39` is beyond the watermark time `8:00:34`. + +The window calculation considers the time gaps and computes the windows based on the tuple timestamp. + +## Guarantees +The windowing functionality in storm core currently provides at-least once guarantee. The values emitted from the bolts +`execute(TupleWindow inputWindow)` method are automatically anchored to all the tuples in the inputWindow. The downstream +bolts are expected to ack the received tuple (i.e the tuple emitted from the windowed bolt) to complete the tuple tree. +If not the tuples will be replayed and the windowing computation will be re-evaluated. + +The tuples in the window are automatically acked when the expire, i.e. when they fall out of the window after +`windowLength + slidingInterval`. Note that the configuration `topology.message.timeout.secs` should be sufficiently more +than `windowLength + slidingInterval` for time based windows; otherwise the tuples will timeout and get replayed and can result +in duplicate evaluations. For count based windows, the configuration should be adjusted such that `windowLength + slidingInterval` +tuples can be received within the timeout period. + +## Example topology +An example toplogy `SlidingWindowTopology` shows how to use the apis to compute a sliding window sum and a tumbling window +average. + +## Stateful windowing +The default windowing implementation in storm stores the tuples in memory until they are processed and expired from the +window. This limits the use cases to windows that +fit entirely in memory. Also the source tuples cannot be ack-ed until the window expiry requiring large message timeouts +(topology.message.timeout.secs should be larger than the window length + sliding interval). This also puts extra loads +due to the complex acking and anchoring requirements. + +To address the above limitations and to support larger window sizes, storm provides stateful windowing support via `IStatefulWindowedBolt`. +User bolts should typically extend `BaseStatefulWindowedBolt` for the windowing operations with the framework automatically +managing the state of the window in the background. + +If the sources provide a monotonically increasing identifier as a part of the message, the framework can use this +to periodically checkpoint the last expired and evaluated message ids, to avoid duplicate window evaluations in case of +failures or restarts. During recovery, the tuples with message ids lower than last expired id are discarded and tuples with +message id between the last expired and last evaluated message ids are fed into the system without activating any previously +activated windows. +The tuples beyond the last evaluated message ids are processed as usual. This can be enabled by setting +the `messageIdField` as shown below, + +```java +topologyBuilder.setBolt("mybolt", + new MyStatefulWindowedBolt() + .withWindow(...) // windowing configuarations + .withMessageIdField("msgid"), // a monotonically increasing 'long' field in the tuple + parallelism) + .shuffleGrouping("spout"); +``` + +However, this option is feasible only if the sources can provide a monotonically increasing identifier in the tuple and the same is maintained +while re-emitting the messages in case of failures. With this option the tuples are still buffered in memory until processed +and expired from the window. + +For more details take a look at the sample topology in storm-starter [StatefulWindowingTopology](../examples/storm-starter/src/jvm/org/apache/storm/starter/StatefulWindowingTopology.java) which will help you get started. + +### Window checkpointing + +With window checkpointing, the monotonically increasing id is no longer required since the framework transparently saves the state of the window periodically into the configured state backend. +The state that is saved includes the tuples in the window, any system state that is required to recover the state of processing +and also the user state. + +```java +topologyBuilder.setBolt("mybolt", + new MyStatefulPersistentWindowedBolt() + .withWindow(...) // windowing configuarations + .withPersistence() // persist the window state + .withMaxEventsInMemory(25000), // max number of events to be cached in memory + parallelism) + .shuffleGrouping("spout"); + +``` + +The `withPersistence` instructs the framework to transparently save the tuples in window along with +any associated system and user state to the state backend. The `withMaxEventsInMemory` is an optional +configuration that specifies the maximum number of tuples that may be kept in memory. The tuples are transparently loaded from +the state backend as required and the ones that are most likely to be used again are retained in memory. + +The state backend can be configured by setting the topology state provider config, + +```java +// use redis for state persistence +conf.put(Config.TOPOLOGY_STATE_PROVIDER, "org.apache.storm.redis.state.RedisKeyValueStateProvider"); + +``` +Currently storm supports Redis and HBase as state backends and uses the underlying state-checkpointing +framework for saving the window state. For more details on state checkpointing see [State-checkpointing](State-checkpointing.html). + +Here is an example of a persistent windowed bolt that uses the window checkpointing to save its state. The `initState` +is invoked with the last saved state (user state) at initialization time. The execute method is invoked based on the configured +windowing parameters and the tuples in the active window can be accessed via an `iterator` as shown below. + +```java +public class MyStatefulPersistentWindowedBolt extends BaseStatefulWindowedBolt { + private KeyValueState state; + + @Override + public void initState(KeyValueState state) { + this.state = state; + // ... + // restore the state from the last saved state. + // ... + } + + @Override + public void execute(TupleWindow window) { + // iterate over tuples in the current window + Iterator it = window.getIter(); + while (it.hasNext()) { + // compute some result based on the tuples in window + } + + // possibly update any state to be maintained across windows + state.put(STATE_KEY, updatedValue); + + // emit the results downstream + collector.emit(new Values(result)); + } +} +``` + +**Note:** In case of persistent windowed bolts, use `TupleWindow.getIter` to retrieve an iterator over the +events in the window. If the number of tuples in windows is huge, invoking `TupleWindow.get` would +try to load all the tuples into memory and may throw an OOM exception. + +**Note:** In case of persistent windowed bolts the `TupleWindow.getNew` and `TupleWindow.getExpired` are currently not supported +and will throw an `UnsupportedOperationException`. + +For more details take a look at the sample topology in storm-starter [PersistentWindowingTopology](../examples/storm-starter/src/jvm/org/apache/storm/starter/PersistentWindowingTopology.java) +which will help you get started. diff --git a/docs/cgroups_in_storm.md b/docs/cgroups_in_storm.md new file mode 100644 index 00000000000..f44bed973e0 --- /dev/null +++ b/docs/cgroups_in_storm.md @@ -0,0 +1,181 @@ +--- +title: CGroup Enforcement +layout: documentation +documentation: true +--- + +# CGroups in Storm + +CGroups are used by Storm to limit the resource usage of workers to guarantee fairness and QOS. + +**Please note: CGroups are currently supported only on Linux platforms (kernel version 2.6.24 and above)** + +## Setup + +To use CGroups make sure to install cgroups and configure cgroups correctly. For more information about setting up and configuring, please visit: + +https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/6/html/Resource_Management_Guide/ch-Using_Control_Groups.html + +A sample/default cgconfig.conf file is supplied in the /conf directory. The contents are as follows: + +``` +mount { + cpuset = /cgroup/cpuset; + cpu = /cgroup/storm_resources; + cpuacct = /cgroup/storm_resources; + memory = /cgroup/storm_resources; + devices = /cgroup/devices; + freezer = /cgroup/freezer; + net_cls = /cgroup/net_cls; + blkio = /cgroup/blkio; +} + +group storm { + perm { + task { + uid = 500; + gid = 500; + } + admin { + uid = 500; + gid = 500; + } + } + cpu { + } + memory { + } + cpuacct { + } +} +``` + +For a more detailed explanation of the format and configs for the cgconfig.conf file, please visit: + +https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/6/html/Resource_Management_Guide/ch-Using_Control_Groups.html#The_cgconfig.conf_File + +To let storm manage the cgroups for individual workers you need to make sure that the resources you want to control are mounted under the same directory as in the example above. +If they are not in the same directory the supervisor will throw an exception. + +The perm section needs to be configured so that the user the supervisor is running as can modify the group. + +If "run as user" is enabled so that the supervisor spawns other processes as the user that launched the topology, make sure that the permissions are such that individual users have read access but not write access. + +# Settings Related To CGroups in Storm + +| Setting | Function | +|-------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| storm.resource.isolation.plugin.enable | This config is used to set whether a resource isolation plugin will be used. Default set to "false". When this config is set to false, unit tests related to cgroups will be skipped. | +| storm.resource.isolation.plugin| Select a resource isolation plugin to use when `storm.resource.isolation.plugin.enable` is set to true. Default to "org.apache.storm.container.cgroup.CgroupManager" | +| storm.cgroup.hierarchy.dir | The path to the cgroup hierarchy that storm will use. Default set to "/cgroup/storm_resources" | +| storm.cgroup.resources | A list of subsystems that will be regulated by CGroups. Default set to cpu and memory. Currently only cpu and memory are supported | +| storm.supervisor.cgroup.rootdir | The root cgroup used by the supervisor. The path to the cgroup will be \/\. Default set to "storm" | +| storm.cgroup.cgexec.cmd | Absolute path to the cgexec command used to launch workers within a cgroup. Default set to "/bin/cgexec" | +| storm.worker.cgroup.memory.mb.limit | The memory limit in MB for each worker. This can be set on a per supervisor node basis. This config is used to set the cgroup config memory.limit_in_bytes. For more details about memory.limit_in_bytes, please visit: https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/6/html/Resource_Management_Guide/sec-memory.html. Please note, if you are using the Resource Aware Scheduler, please do NOT set this config as this config will override the values calculated by the Resource Aware Scheduler | +| storm.worker.cgroup.cpu.limit | The cpu share for each worker. This can be set on a per supervisor node basis. This config is used to set the cgroup config cpu.share. For more details about cpu.share, please visit: https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/6/html/Resource_Management_Guide/sec-cpu.html. Please note, if you are using the Resource Aware Scheduler, please do NOT set this config as this config will override the values calculated by the Resource Aware Scheduler. | + +Since limiting CPU usage via cpu.shares only limits the proportional CPU usage of a process, to limit the amount of CPU usage of all the worker processes on a supervisor node, please set the config supervisor.cpu.capacity. Where each increment represents 1% of a core thus if a user sets supervisor.cpu.capacity: 200, the user is indicating the use of 2 cores. + +## Integration with Resource Aware Scheduler + +CGroups can be used in conjunction with the Resource Aware Scheduler. CGroups will then enforce the resource usage of workers as allocated by the Resource Aware Scheduler. To use cgroups with the Resource Aware Scheduler, simply enable cgroups and be sure NOT to set storm.worker.cgroup.memory.mb.limit and storm.worker.cgroup.cpu.limit configs. + +# CGroup Metrics + +CGroups not only can limit the amount of resources a worker has access to, but it can also help monitor the resource consumption of a worker. There are several metrics enabled by default that will check if the worker is a part of a CGroup and report corresponding metrics. + +## CGroupCPU + +org.apache.storm.metrics2.cgroup.CGroupCPU reports metrics similar to org.apache.storm.metrics.sigar.CPUMetric, but for everything within the CGroup. It reports both user and system CPU usage in ms. + +``` + "CGroupCPU.user-ms": number + "CGroupCPU.sys-ms": number +``` + +CGroup reports these as CLK_TCK counts, and not milliseconds so the accuracy is determined by what CLK_TCK is set to. On most systems it is 100 times a second so at most the accuracy is 10 ms. + +To make these metrics work cpuacct must be mounted. + +## CGroupCpuGuarantee + +org.apache.storm.metrics2.cgroup.CGroupCpuGuarantee reports back an approximate number of ms of CPU time that this worker is guaranteed to get. This is calculated from the resources requested by the tasks in that given worker. + +## CGroupCpuGuaranteeByCfsQuota + +org.apache.storm.metrics2.cgroup.CGroupCpuGuaranteeByCfsQuota reports the percentage of the cpu guaranteed for the worker from cpu.cfs_period_us and cpu.cfs_quota_us. + +## CGroupCpuStat + +org.apache.storm.metrics2.cgroup.CGroupCpuStat reports the bandwidth statistics of the CGroup. It includes +``` + "CGroupCpuStat.nr.period-count": number + "CGroupCpuStat.nr.throttled-count": number + "CGroupCpuStat.nr.throttled-percentage": number + "CGroupCpuStat.throttled.time-ms": number +``` + +It is based on the following `cpu.stat`: + - `nr_periods`: Number of enforcement intervals that have elapsed. + - `nr_throttled`: Number of times the group has been throttled/limited. + - `throttled_time`: The total time duration (in nanoseconds) for which entities of the group have been throttled. + +And the reported metrics are + - `nr.period-count`: the difference of `nr_periods` between two consecutive reporting cycles + - `nr.throttled-count`: the difference of `nr_throttled` between two consecutive reporting cycles + - `nr.throttled-percentage`: (`nr.throttled-count` / `nr.period-count`) + - `throttled.time-ms`: the difference of `throttled_time` in milliseconds between two consecutive reporting cycles + +Note: when `org.apache.storm.container.docker.DockerManager` or `org.apache.storm.container.oci.RuncLibContainerManager` is used as `storm.resource.isolation.plugin`, use `org.apache.storm.metric.cgroup.CGroupCpuGuaranteeByCfsQuota` instead. + +## CGroupMemory + +org.apache.storm.metrics2.cgroup.CGroupMemoryUsage reports the current memory usage of all processes in the cgroup in bytes + +## CGroupMemoryLimit + +org.apache.storm.metrics2.cgroup.CGroupMemoryLimit report the current limit in bytes for all of the processes in the cgroup. If running with CGroups enabled in storm this is the on-heap request + the off-heap request for all tasks within the worker + any extra slop space given to workers. + +## Usage/Debugging CGroups in your topology + +These metrics can be very helpful in debugging what has happened or is happening to your code when it is running under a CGroup. + +### CPU + +CPU guarantees under storm are soft. It means that a worker can ea sly go over their guarantee if there is free CPU available. To detect that your worker is using more CPU then it requested you can sum up the values in CGroupCPU and compare them to CGroupCpuGuarantee. +If CGroupCPU is consistently higher then or equal to CGroupCpuGuarantee you probably want to look at requesting more CPU as your worker may be starved for CPU if more load is placed on the cluster. Being equal to CGroupCpuGuarantee means your worker may already +be throttled. If the used CPU is much smaller than CGroupCpuGuarantee then you are probably wasting resources and may want to reduce your CPU ask. + +If you do have high CPU you probably also want to check out the GC metrics and/or the GC log for your worker. Memory pressure on the heap can result in increased CPU as garbage collection happens. + +### Memory + +Memory debugging of java under a cgroup can be difficult for multiple reasons. + +1. JVM memory management is complex +2. As of the writing of this documentation only experimental support for cgroups is in a few JVMs +3. JNI and other processes can use up memory within the cgroup that the JVM is not always aware of. +4. Memory pressure within the heap can result in increased CPU load instead of increased memory allocation. + +There are several metrics that storm provides by default that can help you understand what is happening within your worker. + +If CGroupMemory gets close to CGroupMemoryLimit then you know that bad things are likely to start happening soon with this worker. Memory is not a soft guarantee like CPU. +If you go over the OOM killer on Linux will start to shoot processes withing your worker. Please pay attention to these metrics. If you are running a version of java that +is cgroup aware then going over the limit typically means that you will need to increase your off heap request. If you are not, it could be that you need more off heap +memory or it could be that java has allocated more memory then it should have as part of the garbage collection process. Figuring out which is typically best done with +trial and error (sorry). + +Storm also reports the JVM's on heap and off heap usage through the "memory/heap" and "memory/nonHeap" metrics respectively. These can be used to give you a hint on +which to increase. Looking at the "usedBytes" field under each can help you understand how much memory the JVM is currently using. Although, like I said the off heap +portion is not always accurate and when the heap grows it can result in unrecorded off heap memory that will cause the cgroup to shoot processes. + +The name of the GC metrics vary based off of the garbage collector you use, but they all start with "GC/". If you sum up all of the "GC/*.timeMs" metrics for a given worker/window pair +you should be able to see how much of the CPU guarantee went to GC. By default java allows 98% of cpu time to go towards GC before it throws an OutOfMemoryError. This is far from ideal +for a near real time streaming system so pay attention to this ratio. + +If the ratio is at a fairly steady state and your memory usage is not even close to the limit you might want to look at reducing your memory request. This too can be complicated to figure +out. + +## Future Work + +There is a lot of work on adding in elasticity to storm. Eventually we hope to be able to do all of the above analysis for you and grow/shrink your topology on demand. diff --git a/docs/distcache-blobstore.md b/docs/distcache-blobstore.md new file mode 100644 index 00000000000..a5897c4cd2a --- /dev/null +++ b/docs/distcache-blobstore.md @@ -0,0 +1,757 @@ +--- +title: Storm Distributed Cache API +layout: documentation +documentation: true +--- +# Storm Distributed Cache API + +The distributed cache feature in storm is used to efficiently distribute files +(or blobs, which is the equivalent terminology for a file in the distributed +cache and is used interchangeably in this document) that are large and can +change during the lifetime of a topology, such as geo-location data, +dictionaries, etc. Typical use cases include phrase recognition, entity +extraction, document classification, URL re-writing, location/address detection +and so forth. Such files may be several KB to several GB in size. For small +datasets that don't need dynamic updates, including them in the topology jar +could be fine. But for large files, the startup times could become very large. +In these cases, the distributed cache feature can provide fast topology startup, +especially if the files were previously downloaded for the same submitter and +are still in the cache. This is useful with frequent deployments, sometimes few +times a day with updated jars, because the large cached files will remain available +without changes. The large cached blobs that do not change frequently will +remain available in the distributed cache. + +At the starting time of a topology, the user specifies the set of files the +topology needs. Once a topology is running, the user at any time can request for +any file in the distributed cache to be updated with a newer version. The +updating of blobs happens in an eventual consistency model. If the topology +needs to know what version of a file it has access to, it is the responsibility +of the user to find this information out. The files are stored in a cache with +Least-Recently Used (LRU) eviction policy, where the supervisor decides which +cached files are no longer needed and can delete them to free disk space. The +blobs can be compressed, and the user can request the blobs to be uncompressed +before it accesses them. + +## Motivation for Distributed Cache +* Allows sharing blobs among topologies. +* Allows updating the blobs from the command line. + +## Distributed Cache Implementations +The current BlobStore interface has the following two implementations +* LocalFsBlobStore +* HdfsBlobStore + +Appendix A contains the interface for blobstore implementation. + +## LocalFsBlobStore +![LocalFsBlobStore](images/local_blobstore.png) + +Local file system implementation of Blobstore can be depicted in the above timeline diagram. + +There are several stages from blob creation to blob download and corresponding execution of a topology. +The main stages can be depicted as follows + +### Blob Creation Command +Blobs in the blobstore can be created through command line using the following command. + +``` +storm blobstore create --file README.txt --acl o::rwa --replication-factor 4 key1 +``` + +The above command creates a blob with a key name “key1” corresponding to the file README.txt. +The access given to all users being read, write and admin with a replication factor of 4. + +### Topology Submission and Blob Mapping +Users can submit their topology with the following command. The command includes the +topology map configuration. The configuration holds two keys “key1” and “key2” with the +key “key1” having a local file name mapping named “blob_file” and it is not compressed. +Workers will restart when the key1 file is updated on the supervisors. + +``` +storm jar /home/y/lib/storm-starter/current/storm-starter-jar-with-dependencies.jar +org.apache.storm.starter.clj.word_count test_topo -c topology.blobstore.map='{"key1":{"localname":"blob_file", "uncompress":false, "workerRestart":true},"key2":{}}' +``` + +### Blob Creation Process +The creation of the blob takes place through the interface “ClientBlobStore”. Appendix B contains the “ClientBlobStore” interface. +The concrete implementation of this interface is the “NimbusBlobStore”. In the case of local file system the client makes a +call to the nimbus to create the blobs within the local file system. The nimbus uses the local file system implementation to create these blobs. +When a user submits a topology, the jar, configuration and code files are uploaded as blobs with the help of blobstore. +Also, all the other blobs specified by the topology are mapped to it with the help of topology.blobstore.map configuration. + +### Blob Download by the Supervisor +Finally, the blobs corresponding to a topology are downloaded by the supervisor once it receives the assignments from the nimbus through +the same “NimbusBlobStore” thrift client that uploaded the blobs. The supervisor downloads the code, jar and conf blobs by calling the +“NimbusBlobStore” client directly while the blobs specified in the topology.blobstore.map are downloaded and mapped locally with the help +of the Localizer. The Localizer talks to the “NimbusBlobStore” thrift client to download the blobs and adds the blob compression and local +blob name mapping logic to suit the implementation of a topology. Once all the blobs have been downloaded the workers are launched to run +the topologies. + +## HdfsBlobStore +![HdfsBlobStore](images/hdfs_blobstore.png) + +The HdfsBlobStore functionality has a similar implementation and blob creation and download procedure barring how the replication +is handled in the two blobstore implementations. The replication in HDFS blobstore is obvious as HDFS is equipped to handle replication +and it requires no state to be stored inside the zookeeper. On the other hand, the local file system blobstore requires the state to be +stored on the zookeeper in order for it to work with nimbus HA. Nimbus HA allows the local filesystem to implement the replication feature +seamlessly by storing the state in the zookeeper about the running topologies and syncing the blobs on various nimbuses. On the supervisor’s +end, the supervisor and localizer talks to HdfsBlobStore through “HdfsClientBlobStore” implementation. + +## Additional Features and Documentation +``` +storm jar /home/y/lib/storm-starter/current/storm-starter-jar-with-dependencies.jar org.apache.storm.starter.clj.word_count test_topo +-c topology.blobstore.map='{"key1":{"localname":"blob_file", "uncompress":false},"key2":{}}' +``` + +### Compression +The blobstore allows the user to specify the “uncompress” configuration to true or false. This configuration can be specified +in the topology.blobstore.map mentioned in the above command. This allows the user to upload a compressed file like a tarball/zip. +In local file system blobstore, the compressed blobs are stored on the nimbus node. The localizer code takes the responsibility to +uncompress the blob and store it on the supervisor node. Symbolic links to the blobs on the supervisor node are created within the worker +before the execution starts. + +### Local File Name Mapping +Apart from compression the blobstore helps to give the blob a name that can be used by the workers. The localizer takes +the responsibility of mapping the blob to a local name on the supervisor node. + +## Additional Blobstore Implementation Details +Blobstore uses a hashing function to create the blobs based on the key. The blobs are generally stored inside the directory specified by +the blobstore.dir configuration. By default, it is stored under “storm.local.dir/blobs” for local file system and a similar path on +hdfs file system. + +Once a file is submitted, the blobstore reads the configs and creates a metadata for the blob with all the access control details. The metadata +is generally used for authorization while accessing the blobs. The blob key and version contribute to the hash code and there by the directory +under “storm.local.dir/blobs/data” where the data is placed. The blobs are generally placed in a positive number directory like 193,822 etc. + +Once the topology is launched and the relevant blobs have been created, the supervisor downloads blobs related to the storm.conf, storm.ser +and storm.code first and all the blobs uploaded by the command line separately using the localizer to uncompress and map them to a local name +specified in the topology.blobstore.map configuration. The supervisor periodically updates blobs by checking for the change of version. +This allows updating the blobs on the fly and thereby making it a very useful feature. + +For a local file system, the distributed cache on the supervisor node is set to 10240 MB as a soft limit and the clean up code attempts +to clean anything over the soft limit every 600 seconds based on LRU policy. + +The HDFS blobstore implementation handles load better by removing the burden on the nimbus to store the blobs, which avoids it becoming a bottleneck. Moreover, it provides seamless replication of blobs. On the other hand, the local file system blobstore is not very efficient in +replicating the blobs and is limited by the number of nimbuses. Moreover, the supervisor talks to the HDFS blobstore directly without the +involvement of the nimbus and thereby reduces the load and dependency on nimbus. + +## Highly Available Nimbus +### Problem Statement: +Currently the storm master aka nimbus, is a process that runs on a single machine under supervision. In most cases, the +nimbus failure is transient and it is restarted by the process that does supervision. However sometimes when disks fail and networks +partitions occur, nimbus goes down. Under these circumstances, the topologies run normally but no new topologies can be +submitted, no existing topologies can be killed/deactivated/activated and if a supervisor node fails then the +reassignments are not performed resulting in performance degradation or topology failures. With this project we intend, +to resolve this problem by running nimbus in a primary backup mode to guarantee that even if a nimbus server fails one +of the backups will take over. + +### Requirements for Highly Available Nimbus: +* Increase overall availability of nimbus. +* Allow nimbus hosts to leave and join the cluster at will any time. A newly joined host should auto catch up and join +the list of potential leaders automatically. +* No topology resubmissions required in case of nimbus fail overs. +* No active topology should ever be lost. + +#### Leader Election: +The nimbus server will use the following interface: + +```java +public interface ILeaderElector { + /** + * queue up for leadership lock. The call returns immediately and the caller + * must check isLeader() to perform any leadership action. + */ + void addToLeaderLockQueue(); + + /** + * Removes the caller from the leader lock queue. If the caller is leader + * also releases the lock. + */ + void removeFromLeaderLockQueue(); + + /** + * + * @return true if the caller currently has the leader lock. + */ + boolean isLeader(); + + /** + * + * @return the current leader's address , throws exception if noone has has lock. + */ + InetSocketAddress getLeaderAddress(); + + /** + * + * @return list of current nimbus addresses, includes leader. + */ + List getAllNimbusAddresses(); +} +``` +Once a nimbus comes up it calls addToLeaderLockQueue() function. The leader election code selects a leader from the queue. +If the topology code, jar or config blobs are missing, it would download the blobs from any other nimbus which is up and running. + +The first implementation will be Zookeeper based. If the zookeeper connection is lost/reset resulting in loss of lock +or the spot in queue the implementation will take care of updating the state such that isLeader() will reflect the +current status. The leader like actions must finish in less than minimumOf(connectionTimeout, SessionTimeout) to ensure +the lock was held by nimbus for the entire duration of the action (Not sure if we want to just state this expectation +and ensure that zk configurations are set high enough which will result in higher failover time or we actually want to +create some sort of rollback mechanism for all actions, the second option needs a lot of code). If a nimbus that is not +leader receives a request that only a leader can perform, it will throw a RunTimeException. + +### Nimbus state store: + +To achieve fail over from primary to backup servers nimbus state/data needs to be replicated across all nimbus hosts or +needs to be stored in a distributed storage. Replicating the data correctly involves state management, consistency checks +and it is hard to test for correctness. However many storm users do not want to take extra dependency on another replicated +storage system like HDFS and still need high availability. The blobstore implementation along with the state storage helps +to overcome the failover scenarios in case a leader nimbus goes down. + +To support replication we will allow the user to define a code replication factor which would reflect number of nimbus +hosts to which the code must be replicated before starting the topology. With replication comes the issue of consistency. +The topology is launched once the code, jar and conf blob files are replicated based on the "topology.min.replication" config. +Maintaining state for failover scenarios is important for local file system. The current implementation makes sure one of the +available nimbus is elected as a leader in the case of a failure. If the topology specific blobs are missing, the leader nimbus +tries to download them as and when they are needed. With this current architecture, we do not have to download all the blobs +required for a topology for a nimbus to accept leadership. This helps us in case the blobs are very large and avoid causing any +inadvertant delays in electing a leader. + +The state for every blob is relevant for the local blobstore implementation. For HDFS blobstore the replication +is taken care by the HDFS. For handling the fail over scenarios for a local blobstore we need to store the state of the leader and +non-leader nimbuses within the zookeeper. + +The state is stored under /storm/blobstore/key/nimbusHostPort:SequenceNumber for the blobstore to work to make nimbus highly available. +This state is used in the local file system blobstore to support replication. The HDFS blobstore does not have to store the state inside the +zookeeper. + +* NimbusHostPort: This piece of information generally contains the parsed string holding the hostname and port of the nimbus. + It uses the same class “NimbusHostPortInfo” used earlier by the code-distributor interface to store the state and parse the data. + +* SequenceNumber: This is the blob sequence number information. The SequenceNumber information is implemented by a KeySequenceNumber class. +The sequence numbers are generated for every key. For every update, the sequence numbers are assigned based ona global sequence number +stored under /storm/blobstoremaxsequencenumber/key. For more details about how the numbers are generated you can look at the java docs for KeySequenceNumber. + +![Nimbus High Availability - BlobStore](images/nimbus_ha_blobstore.png) + +The sequence diagram proposes how the blobstore works and the state storage inside the zookeeper makes the nimbus highly available. +Currently, the thread to sync the blobs on a non-leader is within the nimbus. In the future, it will be nice to move the thread around +to the blobstore to make the blobstore coordinate the state change and blob download as per the sequence diagram. + +## Thrift and Rest API +In order to avoid workers/supervisors/ui talking to zookeeper for getting master nimbus address we are going to modify the +`getClusterInfo` API so it can also return nimbus information. getClusterInfo currently returns `ClusterSummary` instance +which has a list of `supervisorSummary` and a list of `topologySummary` instances. We will add a list of `NimbusSummary` +to the `ClusterSummary`. See the structures below: + +``` +struct ClusterSummary { + 1: required list supervisors; + 3: required list topologies; + 4: required list nimbuses; +} + +struct NimbusSummary { + 1: required string host; + 2: required i32 port; + 3: required i32 uptime_secs; + 4: required bool isLeader; + 5: required string version; +} +``` + +This will be used by StormSubmitter, Nimbus clients, supervisors and ui to discover the current leaders and participating +nimbus hosts. Any nimbus host will be able to respond to these requests. The nimbus hosts can read this information once +from zookeeper and cache it and keep updating the cache when the watchers are fired to indicate any changes,which should +be rare in general case. + +Note: All nimbus hosts have watchers on zookeeper to be notified immediately as soon as a new blobs is available for download, the callback may or may not download +the code. Therefore, a background thread is triggered to download the respective blobs to run the topologies. The replication is achieved when the blobs are downloaded +onto non-leader nimbuses. So you should expect your topology submission time to be somewhere between 0 to (2 * nimbus.code.sync.freq.secs) for any +nimbus.min.replication.count > 1. + +## Configuration + +``` +blobstore.dir: The directory where all blobs are stored. For local file system it represents the directory on the nimbus +node and for HDFS file system it represents the hdfs file system path. + +supervisor.blobstore.class: This configuration is meant to set the client for the supervisor in order to talk to the blobstore. +For a local file system blobstore it is set to “org.apache.storm.blobstore.NimbusBlobStore” and for the HDFS blobstore it is set +to “org.apache.storm.blobstore.HdfsClientBlobStore”. + +supervisor.blobstore.download.thread.count: This configuration spawns multiple threads for from the supervisor in order download +blobs concurrently. The default is set to 5 + +supervisor.blobstore.download.max_retries: This configuration is set to allow the supervisor to retry for the blob download. +By default it is set to 3. + +supervisor.localizer.cache.target.size.mb: The jvm opts provided to workers launched by this supervisor. All "%ID%" substrings +are replaced with an identifier for this worker. Also, "%WORKER-ID%", "%STORM-ID%" and "%WORKER-PORT%" are replaced with +appropriate runtime values for this worker. The distributed cache target size in MB. This is a soft limit to the size +of the distributed cache contents. It is set to 10240 MB. + +supervisor.localizer.cleanup.interval.ms: The distributed cache cleanup interval. Controls how often it scans to attempt to +cleanup anything over the cache target size. By default it is set to 300000 milliseconds. + +supervisor.localizer.update.blob.interval.secs: The distributed cache interval for checking for blobs to update. By +default it is set to 30 seconds. + +nimbus.blobstore.class: Sets the blobstore implementation nimbus uses. It is set to "org.apache.storm.blobstore.LocalFsBlobStore" + +nimbus.blobstore.expiration.secs: During operations with the blobstore, via master, how long a connection is idle before nimbus +considers it dead and drops the session and any associated connections. The default is set to 600. + +storm.blobstore.inputstream.buffer.size.bytes: The buffer size it uses for blobstore upload. It is set to 65536 bytes. + +client.blobstore.class: The blobstore implementation the storm client uses. The current implementation uses the default +config "org.apache.storm.blobstore.NimbusBlobStore". + +blobstore.replication.factor: It sets the replication for each blob within the blobstore. The “topology.min.replication.count” +ensures the minimum replication the topology specific blobs are set before launching the topology. You might want to set the +“topology.min.replication.count <= blobstore.replication”. The default is set to 3. + +topology.min.replication.count : Minimum number of nimbus hosts where the code must be replicated before leader nimbus +can mark the topology as active and create assignments. Default is 1. + +topology.max.replication.wait.time.sec: Maximum wait time for the nimbus host replication to achieve the nimbus.min.replication.count. +Once this time is elapsed nimbus will go ahead and perform topology activation tasks even if required nimbus.min.replication.count is not achieved. +The default is 60 seconds, a value of -1 indicates to wait for ever. +* nimbus.code.sync.freq.secs: Frequency at which the background thread on nimbus which syncs code for locally missing blobs. Default is 2 minutes. +``` + +Additionally, if you want to access to secure hdfs blobstore, you also need to set the following configs. +``` +storm.hdfs.login.keytab or blobstore.hdfs.keytab (deprecated) +storm.hdfs.login.principal or blobstore.hdfs.principal (deprecated) +``` + +For example, +``` +storm.hdfs.login.keytab: /etc/keytab +storm.hdfs.login.principal: primary/instance@REALM +``` + + +## Using the Distributed Cache API, Command Line Interface (CLI) + +### Creating blobs + +To use the distributed cache feature, the user first has to "introduce" files +that need to be cached and bind them to key strings. To achieve this, the user +uses the "blobstore create" command of the storm executable, as follows: + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +storm blobstore create [-f|--file FILE] [-a|--acl ACL1,ACL2,...] [--replication-factor NUMBER] [keyname] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The contents come from a FILE, if provided by -f or --file option, otherwise +from STDIN. +The ACLs, which can also be a comma separated list of many ACLs, is of the +following format: + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +> [u|o]:[username]:[r-|w-|a-|_] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +where: + +* u = user +* o = other +* username = user for this particular ACL +* r = read access +* w = write access +* a = admin access +* _ = ignored + +The replication factor can be set to a value greater than 1 using --replication-factor. + +Note: The replication right now is configurable for a hdfs blobstore but for a +local blobstore the replication always stays at 1. For a hdfs blobstore +the default replication is set to 3. + +###### Example: + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +storm blobstore create --file README.txt --acl o::rwa --replication-factor 4 key1 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In the above example, the *README.txt* file is added to the distributed cache. +It can be accessed using the key string "*key1*" for any topology that needs +it. The file is set to have read/write/admin access for others, a.k.a world +everything and the replication is set to 4. + +###### Example: + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The above example createss a mytopo:data.tgz key using the data stored in +data.tgz. User alice would have full access, bob would have read/write access +and everyone else would have read access. + +### Making dist. cache files accessible to topologies + +Once a blob is created, we can use it for topologies. This is generally achieved +by including the key string among the configurations of a topology, with the +following format. A shortcut is to add the configuration item on the command +line when starting a topology by using the **-c** command: + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +-c topology.blobstore.map='{"[KEY]":{"localname":"[VALUE]", "uncompress":[true|false]}}' +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Note: Please take care of the quotes. + +The cache file would then be accessible to the topology as a local file with the +name [VALUE]. +The localname parameter is optional, if omitted the local cached file will have +the same name as [KEY]. +The uncompress parameter is optional, if omitted the local cached file will not +be uncompressed. Note that the key string needs to have the appropriate +file-name-like format and extension, so it can be uncompressed correctly. + +###### Example: + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +storm jar /home/y/lib/storm-starter/current/storm-starter-jar-with-dependencies.jar org.apache.storm.starter.clj.word_count test_topo -c topology.blobstore.map='{"key1":{"localname":"blob_file", "uncompress":false},"key2":{}}' +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Note: Please take care of the quotes. + +In the above example, we start the *word_count* topology (stored in the +*storm-starter-jar-with-dependencies.jar* file), and ask it to have access +to the cached file stored with key string = *key1*. This file would then be +accessible to the topology as a local file called *blob_file*, and the +supervisor will not try to uncompress the file. Note that in our example, the +file's content originally came from *README.txt*. We also ask for the file +stored with the key string = *key2* to be accessible to the topology. Since +both the optional parameters are omitted, this file will get the local name = +*key2*, and will not be uncompressed. + +### Updating a cached file + +It is possible for the cached files to be updated while topologies are running. +The update happens in an eventual consistency model, where the supervisors poll +Nimbus every supervisor.localizer.update.blob.interval.secs seconds, and update their local copies. In the current version, +it is the user's responsibility to check whether a new file is available. + +To update a cached file, use the following command. Contents come from a FILE or +STDIN. Write access is required to be able to update a cached file. + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +storm blobstore update [-f|--file NEW_FILE] [KEYSTRING] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +###### Example: + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +storm blobstore update -f updates.txt key1 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In the above example, the topologies will be presented with the contents of the +file *updates.txt* instead of *README.txt* (from the previous example), even +though their access by the topology is still through a file called +*blob_file*. + +### Removing a cached file + +To remove a file from the distributed cache, use the following command. Removing +a file requires write access. + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +storm blobstore delete [KEYSTRING] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +### Listing Blobs currently in the distributed cache blobstore + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +storm blobstore list [KEY...] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +lists blobs currently in the blobstore + +### Reading the contents of a blob + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +storm blobstore cat [-f|--file FILE] KEY +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +read a blob and then either write it to a file, or STDOUT. Reading a blob +requires read access. + +### Setting the access control for a blob + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +set-acl [-s ACL] KEY +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list +(requires admin access). + +### Update the replication factor for a blob + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +storm blobstore replication --update --replication-factor 5 key1 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +### Read the replication factor of a blob + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +storm blobstore replication --read key1 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +### Command line help + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +storm help blobstore +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +## Using the Distributed Cache API from Java + +We start by getting a ClientBlobStore object by calling this function: + +``` java +Config theconf = new Config(); +theconf.putAll(Utils.readStormConfig()); +ClientBlobStore clientBlobStore = Utils.getClientBlobStore(theconf); +``` + +The required Utils package can by imported by: + +```java +import org.apache.storm.utils.Utils; +``` + +ClientBlobStore and other blob-related classes can be imported by: + +```java +import org.apache.storm.blobstore.ClientBlobStore; +import org.apache.storm.blobstore.AtomicOutputStream; +import org.apache.storm.blobstore.InputStreamWithMeta; +import org.apache.storm.blobstore.BlobStoreAclHandler; +import org.apache.storm.generated.*; +``` + +### Creating ACLs to be used for blobs + +```java +String stringBlobACL = "u:username:rwa"; +AccessControl blobACL = BlobStoreAclHandler.parseAccessControl(stringBlobACL); +List acls = new LinkedList(); +acls.add(blobACL); // more ACLs can be added here +SettableBlobMeta settableBlobMeta = new SettableBlobMeta(acls); +settableBlobMeta.set_replication_factor(4); // Here we can set the replication factor +``` + +The settableBlobMeta object is what we need to create a blob in the next step. + +### Creating a blob + +```java +AtomicOutputStream blobStream = clientBlobStore.createBlob("some_key", settableBlobMeta); +blobStream.write("Some String or input data".getBytes()); +blobStream.close(); +``` + +Note that the settableBlobMeta object here comes from the last step, creating ACLs. +It is recommended that for very large files, the user writes the bytes in smaller chunks (for example 64 KB, up to 1 MB chunks). + +### Updating a blob + +Similar to creating a blob, but we get the AtomicOutputStream in a different way: + +```java +String blobKey = "some_key"; +AtomicOutputStream blobStream = clientBlobStore.updateBlob(blobKey); +``` + +Pass a byte stream to the returned AtomicOutputStream as before. + +### Updating the ACLs of a blob + +```java +String blobKey = "some_key"; +AccessControl updateAcl = BlobStoreAclHandler.parseAccessControl("u:USER:--a"); +List updateAcls = new LinkedList(); +updateAcls.add(updateAcl); +SettableBlobMeta modifiedSettableBlobMeta = new SettableBlobMeta(updateAcls); +clientBlobStore.setBlobMeta(blobKey, modifiedSettableBlobMeta); + +//Now set write only +updateAcl = BlobStoreAclHandler.parseAccessControl("u:USER:-w-"); +updateAcls = new LinkedList(); +updateAcls.add(updateAcl); +modifiedSettableBlobMeta = new SettableBlobMeta(updateAcls); +clientBlobStore.setBlobMeta(blobKey, modifiedSettableBlobMeta); +``` + +### Updating and Reading the replication of a blob + +```java +String blobKey = "some_key"; +BlobReplication replication = clientBlobStore.updateBlobReplication(blobKey, 5); +int replication_factor = replication.get_replication(); +``` + +Note: The replication factor gets updated and reflected only for hdfs blobstore + +### Reading a blob + +```java +String blobKey = "some_key"; +InputStreamWithMeta blobInputStream = clientBlobStore.getBlob(blobKey); +BufferedReader r = new BufferedReader(new InputStreamReader(blobInputStream)); +String blobContents = r.readLine(); +``` + +### Deleting a blob + +```java +String blobKey = "some_key"; +clientBlobStore.deleteBlob(blobKey); +``` + +### Getting a list of blob keys already in the blobstore + +```java +Iterator stringIterator = clientBlobStore.listKeys(); +``` + +## Appendix A + +```java +public abstract void prepare(Map conf, String baseDir); + +public abstract AtomicOutputStream createBlob(String key, SettableBlobMeta meta, Subject who) throws AuthorizationException, KeyAlreadyExistsException; + +public abstract AtomicOutputStream updateBlob(String key, Subject who) throws AuthorizationException, KeyNotFoundException; + +public abstract ReadableBlobMeta getBlobMeta(String key, Subject who) throws AuthorizationException, KeyNotFoundException; + +public abstract void setBlobMeta(String key, SettableBlobMeta meta, Subject who) throws AuthorizationException, KeyNotFoundException; + +public abstract void deleteBlob(String key, Subject who) throws AuthorizationException, KeyNotFoundException; + +public abstract InputStreamWithMeta getBlob(String key, Subject who) throws AuthorizationException, KeyNotFoundException; + +public abstract Iterator listKeys(Subject who); + +public abstract BlobReplication getBlobReplication(String key, Subject who) throws Exception; + +public abstract BlobReplication updateBlobReplication(String key, int replication, Subject who) throws AuthorizationException, KeyNotFoundException, IOException +``` + +## Appendix B + +```java +public abstract void prepare(Map conf); + +protected abstract AtomicOutputStream createBlobToExtend(String key, SettableBlobMeta meta) throws AuthorizationException, KeyAlreadyExistsException; + +public abstract AtomicOutputStream updateBlob(String key) throws AuthorizationException, KeyNotFoundException; + +public abstract ReadableBlobMeta getBlobMeta(String key) throws AuthorizationException, KeyNotFoundException; + +protected abstract void setBlobMetaToExtend(String key, SettableBlobMeta meta) throws AuthorizationException, KeyNotFoundException; + +public abstract void deleteBlob(String key) throws AuthorizationException, KeyNotFoundException; + +public abstract InputStreamWithMeta getBlob(String key) throws AuthorizationException, KeyNotFoundException; + +public abstract Iterator listKeys(); + +public abstract void watchBlob(String key, IBlobWatcher watcher) throws AuthorizationException; + +public abstract void stopWatchingBlob(String key) throws AuthorizationException; + +public abstract BlobReplication getBlobReplication(String Key) throws AuthorizationException, KeyNotFoundException; + +public abstract BlobReplication updateBlobReplication(String Key, int replication) throws AuthorizationException, KeyNotFoundException +``` + +## Appendix C + +``` +service Nimbus { +... +string beginCreateBlob(1: string key, 2: SettableBlobMeta meta) throws (1: AuthorizationException aze, 2: KeyAlreadyExistsException kae); + +string beginUpdateBlob(1: string key) throws (1: AuthorizationException aze, 2: KeyNotFoundException knf); + +void uploadBlobChunk(1: string session, 2: binary chunk) throws (1: AuthorizationException aze); + +void finishBlobUpload(1: string session) throws (1: AuthorizationException aze); + +void cancelBlobUpload(1: string session) throws (1: AuthorizationException aze); + +ReadableBlobMeta getBlobMeta(1: string key) throws (1: AuthorizationException aze, 2: KeyNotFoundException knf); + +void setBlobMeta(1: string key, 2: SettableBlobMeta meta) throws (1: AuthorizationException aze, 2: KeyNotFoundException knf); + +BeginDownloadResult beginBlobDownload(1: string key) throws (1: AuthorizationException aze, 2: KeyNotFoundException knf); + +binary downloadBlobChunk(1: string session) throws (1: AuthorizationException aze); + +void deleteBlob(1: string key) throws (1: AuthorizationException aze, 2: KeyNotFoundException knf); + +ListBlobsResult listBlobs(1: string session); + +BlobReplication getBlobReplication(1: string key) throws (1: AuthorizationException aze, 2: KeyNotFoundException knf); + +BlobReplication updateBlobReplication(1: string key, 2: i32 replication) throws (1: AuthorizationException aze, 2: KeyNotFoundException knf); +... +} + +struct BlobReplication { +1: required i32 replication; +} + +exception AuthorizationException { + 1: required string msg; +} + +exception KeyNotFoundException { + 1: required string msg; +} + +exception KeyAlreadyExistsException { + 1: required string msg; +} + +enum AccessControlType { + OTHER = 1, + USER = 2 + //eventually ,GROUP=3 +} + +struct AccessControl { + 1: required AccessControlType type; + 2: optional string name; //Name of user or group in ACL + 3: required i32 access; //bitmasks READ=0x1, WRITE=0x2, ADMIN=0x4 +} + +struct SettableBlobMeta { + 1: required list acl; + 2: optional i32 replication_factor +} + +struct ReadableBlobMeta { + 1: required SettableBlobMeta settable; + //This is some indication of a version of a BLOB. The only guarantee is + // if the data changed in the blob the version will be different. + 2: required i64 version; +} + +struct ListBlobsResult { + 1: required list keys; + 2: required string session; +} + +struct BeginDownloadResult { + //Same version as in ReadableBlobMeta + 1: required i64 version; + 2: required string session; + 3: optional i64 data_size; +} +``` diff --git a/docs/dynamic-log-level-settings.md b/docs/dynamic-log-level-settings.md new file mode 100644 index 00000000000..65b2d0a9a99 --- /dev/null +++ b/docs/dynamic-log-level-settings.md @@ -0,0 +1,45 @@ +--- +title: Dynamic Log Level Settings +layout: documentation +documentation: true +--- + + +We have added the ability to set log level settings for a running topology using the Storm UI and the Storm CLI. + +The log level settings apply the same way as you'd expect from log4j, as all we are doing is telling log4j to set the level of the logger you provide. If you set the log level of a parent logger, the children loggers start using that level (unless the children have a more restrictive level already). A timeout can optionally be provided (except for DEBUG mode, where it’s required in the UI), if workers should reset log levels automatically. + +This revert action is triggered using a polling mechanism (every 30 seconds, but this is configurable), so you should expect your timeouts to be the value you provided plus anywhere between 0 and the setting's value. + +Using the Storm UI +------------- + +In order to set a level, click on a running topology, and then click on “Change Log Level” in the Topology Actions section. + +![Change Log Level dialog](images/dynamic_log_level_settings_1.png "Change Log Level dialog") + +Next, provide the logger name, select the level you expect (e.g. WARN), and a timeout in seconds (or 0 if not needed). Then click on “Add”. + +![After adding a log level setting](images/dynamic_log_level_settings_2.png "After adding a log level setting") + +To clear the log level click on the “Clear” button. This reverts the log level back to what it was before you added the setting. The log level line will disappear from the UI. + +While there is a delay resetting log levels back, setting the log level in the first place is immediate (or as quickly as the message can travel from the UI/CLI to the workers by way of nimbus and zookeeper). + +Using the CLI +------------- + +Using the CLI, issue the command: + +`./bin/storm set_log_level [topology name] -l [logger name]=[LEVEL]:[TIMEOUT]` + +For example: + +`./bin/storm set_log_level my_topology -l ROOT=DEBUG:30` + +Sets the ROOT logger to DEBUG for 30 seconds. + +`./bin/storm set_log_level my_topology -r ROOT` + +Clears the ROOT logger dynamic log level, resetting it to its original value. + diff --git a/docs/dynamic-worker-profiling.md b/docs/dynamic-worker-profiling.md new file mode 100644 index 00000000000..f1b83e908a2 --- /dev/null +++ b/docs/dynamic-worker-profiling.md @@ -0,0 +1,37 @@ +--- +title: Dynamic Worker Profiling +layout: documentation +documentation: true +--- + + +In multi-tenant mode, storm launches long-running JVMs across cluster without sudo access to user. Self-serving of Java heap-dumps, jstacks and java profiling of these JVMs would improve users' ability to analyze and debug issues when monitoring it actively. + +The storm dynamic profiler lets you dynamically take heap-dumps, jprofile or jstack for a worker jvm running on stock cluster. It let user download these dumps from the browser and use your favorite tools to analyze it The UI component page provides list workers for the component and action buttons. The logviewer lets you download the dumps generated by these logs. Please see the screenshots for more information. + +Using the Storm UI +------------- + +In order to request for heap-dump, jstack, start/stop/dump jprofile or restart a worker, click on a running topology, then click on specific component, then you can select workers by checking the box of any of the worker's executors in the Executors table, and then click on “Start","Heap", "Jstack" or "Restart Worker" in the "Profiling and Debugging" section. + +![Selecting Workers](images/dynamic_profiling_debugging_4.png "Selecting Workers") + +In the Executors table, click the checkbox in the Actions column next to any executor, and any other executors belonging to the same worker are automatically selected. When the action has completed, any output files created will available at the link in the Actions column. + +![Profiling and Debugging](images/dynamic_profiling_debugging_1.png "Profiling and Debugging") + +For start jprofile, provide a timeout in minutes (or 10 if not needed). Then click on “Start”. + +![After starting jprofile for worker](images/dynamic_profiling_debugging_2.png "After jprofile for worker ") + +To stop the jprofile logging click on the “Stop” button. This dumps the jprofile stats and stops the profiling. Refresh the page for the line to disappear from the UI. + +Click on "My Dump Files" to go the logviewer UI for list of worker specific dump files. + +![Dump Files Links for worker](images/dynamic_profiling_debugging_3.png "Dump Files Links for worker") + +Configuration +------------- + +The "worker.profiler.command" can be configured to point to specific pluggable profiler, heapdump commands. The "worker.profiler.enabled" can be disabled if plugin is not available or jdk does not support Jprofile flight recording so that worker JVM options will not have "worker.profiler.childopts". To use different profiler plugin, you can change these configuration. + diff --git a/docs/favicon.ico b/docs/favicon.ico new file mode 100644 index 00000000000..7149e2eba60 Binary files /dev/null and b/docs/favicon.ico differ diff --git a/docs/flux.md b/docs/flux.md new file mode 100644 index 00000000000..8b7f09d0939 --- /dev/null +++ b/docs/flux.md @@ -0,0 +1,869 @@ +--- +title: Flux +layout: documentation +documentation: true +--- + +A framework for creating and deploying Apache Storm streaming computations with less friction. + +## Definition +**flux** |fləks| _noun_ + +1. The action or process of flowing or flowing out +2. Continuous change +3. In physics, the rate of flow of a fluid, radiant energy, or particles across a given area +4. A substance mixed with a solid to lower its melting point + +## Rationale +Bad things happen when configuration is hard-coded. No one should have to recompile or repackage an application in +order to change configuration. + +## About +Flux is a framework and set of utilities that make defining and deploying Apache Storm topologies less painful and +deveoper-intensive. One of the pain points often mentioned is the fact that the wiring for a Topology graph is often tied up in Java code, +and that any changes require recompilation and repackaging of the topology jar file. Flux aims to alleviate that +pain by allowing you to package all your Storm components in a single jar, and use an external text file to define +the layout and configuration of your topologies. + +## Features + + * Easily configure and deploy Storm topologies (Both Storm core and Microbatch API) without embedding configuration + in your topology code + * Support for existing topology code (see below) + * Define Storm Core API (Spouts/Bolts) using a flexible YAML DSL + * YAML DSL support for most Storm components (storm-kafka-client, storm-hdfs, storm-hbase, etc.) + * Convenient support for multi-lang components + * External property substitution/filtering for easily switching between configurations/environments (similar to Maven-style + `${variable.name}` substitution) + +## Usage + +To use Flux, add it as a dependency and package all your Storm components in a fat jar, then create a YAML document +to define your topology (see below for YAML configuration options). + +### Building from Source +The easiest way to use Flux, is to add it as a Maven dependency in you project as described below. + +If you would like to build Flux from source and run the unit/integration tests, you will need the following installed +on your system: + +* Python 3.0.x or later +* Node.js 0.10.x or later + +#### Building with unit tests enabled: + +``` +mvn clean install +``` + +#### Building with unit tests disabled: +If you would like to build Flux without installing Python or Node.js you can simply skip the unit tests: + +``` +mvn clean install -DskipTests=true +``` + +Note that if you plan on using Flux to deploy topologies to a remote cluster, you will still need to have Python +installed since it is required by Apache Storm. + + +#### Building with integration tests enabled: + +``` +mvn clean install -DskipIntegration=false +``` + + +### Packaging with Maven +To enable Flux for your Storm components, you need to add it as a dependency such that it's included in the Storm +topology jar. This can be accomplished with the Maven shade plugin (preferred) or the Maven assembly plugin (not +recommended). + +#### Flux Maven Dependency +The current version of Flux is available in Maven Central at the following coordinates: +```xml + + org.apache.storm + flux-core + ${storm.version} + +``` + +Using shell spouts and bolts requires additional Flux Wrappers library: +```xml + + org.apache.storm + flux-wrappers + ${storm.version} + +``` + +#### Creating a Flux-Enabled Topology JAR +The example below illustrates Flux usage with the Maven shade plugin: + + ```xml + + + + + org.apache.storm + flux-core + ${storm.version} + + + + org.apache.storm + flux-wrappers + ${storm.version} + + + + + + + + + + org.apache.maven.plugins + maven-shade-plugin + 1.4 + + true + + + + package + + shade + + + + + + org.apache.storm.flux.Flux + + + + + + + + + ``` + +### Deploying and Running a Flux Topology +Once your topology components are packaged with the Flux dependency, you can run different topologies either locally +or remotely using the `storm jar` command. For example, if your fat jar is named `myTopology-0.1.0-SNAPSHOT.jar` you +could run it locally with the command: + + +```bash +storm jar myTopology-0.1.0-SNAPSHOT.jar org.apache.storm.flux.Flux --local my_config.yaml + +``` + +### Command line options +``` +usage: storm jar org.apache.storm.flux.Flux + [options] + -d,--dry-run Do not run or deploy the topology. Just + build, validate, and print information about + the topology. + -e,--env-filter Perform environment variable substitution. + Replace keys identified with `${ENV-[NAME]}` + will be replaced with the corresponding + `NAME` environment value + -f,--filter Perform property substitution. Use the + specified file as a source of properties, + and replace keys identified with {$[property + name]} with the value defined in the + properties file. + -i,--inactive Deploy the topology, but do not activate it. + -l,--local Run the topology in local mode. + -n,--no-splash Suppress the printing of the splash screen. + -q,--no-detail Suppress the printing of topology details. + -r,--remote Deploy the topology to a remote cluster. + -R,--resource Treat the supplied path as a classpath + resource instead of a file. + -s,--sleep When running locally, the amount of time to + sleep (in ms.) before killing the topology + and shutting down the local cluster. + -z,--zookeeper When running in local mode, use the + ZooKeeper at the specified : + instead of the in-process ZooKeeper. + (requires Storm 0.9.3 or later) +``` + +**NOTE:** Flux tries to avoid command line switch collision with the `storm` command, and allows any other command line +switches to pass through to the `storm` command. + +For example, you can use the `storm` command switch `-c` to override a topology configuration property. The following +example command will run Flux and override the `nimbus.seeds` configuration: + +```bash +storm jar myTopology-0.1.0-SNAPSHOT.jar org.apache.storm.flux.Flux --remote my_config.yaml -c 'nimbus.seeds=["localhost"]' +``` + +### Sample output +``` +███████╗██╗ ██╗ ██╗██╗ ██╗ +██╔════╝██║ ██║ ██║╚██╗██╔╝ +█████╗ ██║ ██║ ██║ ╚███╔╝ +██╔══╝ ██║ ██║ ██║ ██╔██╗ +██║ ███████╗╚██████╔╝██╔╝ ██╗ +╚═╝ ╚══════╝ ╚═════╝ ╚═╝ ╚═╝ ++- Apache Storm -+ ++- data FLow User eXperience -+ +Version: 0.3.0 +Parsing file: /Users/hsimpson/Projects/donut_domination/storm/shell_test.yaml +---------- TOPOLOGY DETAILS ---------- +Name: shell-topology +--------------- SPOUTS --------------- +sentence-spout[1](org.apache.storm.flux.wrappers.spouts.FluxShellSpout) +---------------- BOLTS --------------- +splitsentence[1](org.apache.storm.flux.wrappers.bolts.FluxShellBolt) +log[1](org.apache.storm.flux.wrappers.bolts.LogInfoBolt) +count[1](org.apache.storm.testing.TestWordCounter) +--------------- STREAMS --------------- +sentence-spout --SHUFFLE--> splitsentence +splitsentence --FIELDS--> count +count --SHUFFLE--> log +-------------------------------------- +Submitting topology: 'shell-topology' to remote cluster... +``` + +## YAML Configuration +Flux topologies are defined in a YAML file that describes a topology. A Flux topology +definition consists of the following: + + 1. A topology name + 2. A list of topology "components" (named Java objects that will be made available in the environment) + 3. **EITHER** (A DSL topology definition): + * A list of spouts, each identified by a unique ID + * A list of bolts, each identified by a unique ID + * A list of "stream" objects representing a flow of tuples between spouts and bolts + * (Optional) A list of "workerHooks", each identifed by a unique ID + 4. **OR** (A JVM class that can produce a `org.apache.storm.generated.StormTopology` instance: + * A `topologySource` definition. + + + +For example, here is a simple definition of a wordcount topology using the YAML DSL: + +```yaml +name: "yaml-topology" +config: + topology.workers: 1 + +# spout definitions +spouts: + - id: "spout-1" + className: "org.apache.storm.testing.TestWordSpout" + parallelism: 1 + +# bolt definitions +bolts: + - id: "bolt-1" + className: "org.apache.storm.testing.TestWordCounter" + parallelism: 1 + - id: "bolt-2" + className: "org.apache.storm.flux.wrappers.bolts.LogInfoBolt" + parallelism: 1 + +#stream definitions +streams: + - name: "spout-1 --> bolt-1" # name isn't used (placeholder for logging, UI, etc.) + from: "spout-1" + to: "bolt-1" + grouping: + type: FIELDS + args: ["word"] + + - name: "bolt-1 --> bolt2" + from: "bolt-1" + to: "bolt-2" + grouping: + type: SHUFFLE + +# worker hook definitions +workerHooks: + - id: "base-worker-hook" + className: "org.apache.storm.hooks.BaseWorkerHook" + +``` +## Property Substitution/Filtering +It's common for developers to want to easily switch between configurations, for example switching deployment between +a development environment and a production environment. This can be accomplished by using separate YAML configuration +files, but that approach would lead to unnecessary duplication, especially in situations where the Storm topology +does not change, but configuration settings such as host names, ports, and parallelism paramters do. + +For this case, Flux offers properties filtering to allow you two externalize values to a `.properties` file and have +them substituted before the `.yaml` file is parsed. + +To enable property filtering, use the `--filter` command line option and specify a `.properties` file. For example, +if you invoked flux like so: + +```bash +storm jar myTopology-0.1.0-SNAPSHOT.jar org.apache.storm.flux.Flux --local my_config.yaml --filter dev.properties +``` +With the following `dev.properties` file: + +```properties +kafka.zookeeper.hosts: localhost:2181 +``` + +You would then be able to reference those properties by key in your `.yaml` file using `${}` syntax: + +```yaml + - id: "zkHosts" + className: "org.apache.storm.kafka.ZkHosts" + constructorArgs: + - "${kafka.zookeeper.hosts}" +``` + +In this case, Flux would replace `${kafka.zookeeper.hosts}` with `localhost:2181` before parsing the YAML contents. + +### Environment Variable Substitution/Filtering +Flux also allows environment variable substitution. For example, if an environment variable named `ZK_HOSTS` if defined, +you can reference it in a Flux YAML file with the following syntax: + +``` +${ENV-ZK_HOSTS} +``` + +## Components +Components are essentially named object instances that are made available as configuration options for spouts and +bolts. If you are familiar with the Spring framework, components are roughly analagous to Spring beans. + +Every component is identified, at a minimum, by a unique identifier (String) and a class name (String). For example, +the following will make an instance of the `org.apache.storm.kafka.StringScheme` class available as a reference under the key +`"stringScheme"` . This assumes the `org.apache.storm.kafka.StringScheme` has a default constructor. + +```yaml +components: + - id: "stringScheme" + className: "org.apache.storm.kafka.StringScheme" +``` + +### Static factory methods +It is also possible to use static factory methods from Flux. Given the following Java code: + +```java +public class TestBolt extends BaseBasicBolt { + public static TestBolt newInstance(Duration triggerTime) { + return new TestBolt(triggerTime); + } +} +``` + +```java +public class Duration { + public static Duration ofSeconds(long seconds) { + return new Duration(seconds); + } +} +``` + +it is possible to use the factory methods as follows: + +```yaml +components: + - id: "time" + className: "java.time.Duration" + factory: "ofSeconds" + +bolts: + - id: "testBolt" + className: "org.apache.storm.flux.test.TestBolt" + factory: "newInstance" + factoryArgs: + - ref: "time" +``` + +### Contructor Arguments, References, Properties and Configuration Methods + +####Constructor Arguments +Arguments to a class constructor can be configured by adding a `contructorArgs` element to a components. +`constructorArgs` is a list of objects that will be passed to the class' constructor. The following example creates an +object by calling the constructor that takes a single string as an argument: + +```yaml + - id: "zkHosts" + className: "org.apache.storm.kafka.ZkHosts" + constructorArgs: + - "localhost:2181" +``` + +####References +Each component instance is identified by a unique id that allows it to be used/reused by other components. To +reference an existing component, you specify the id of the component with the `ref` tag. + +In the following example, a component with the id `"stringScheme"` is created, and later referenced, as a an argument +to another component's constructor: + +```yaml +components: + - id: "stringScheme" + className: "org.apache.storm.kafka.StringScheme" + + - id: "stringMultiScheme" + className: "org.apache.storm.spout.SchemeAsMultiScheme" + constructorArgs: + - ref: "stringScheme" # component with id "stringScheme" must be declared above. +``` +**N.B.:** References can only be used after (below) the object they point to has been declared. + +####Properties +In addition to calling constructors with different arguments, Flux also allows you to configure components using +JavaBean-like setter methods and fields declared as `public`: + +```yaml + - id: "spoutConfig" + className: "org.apache.storm.kafka.SpoutConfig" + constructorArgs: + # brokerHosts + - ref: "zkHosts" + # topic + - "myKafkaTopic" + # zkRoot + - "/kafkaSpout" + # id + - "myId" + properties: + - name: "ignoreZkOffsets" + value: true + - name: "scheme" + ref: "stringMultiScheme" +``` + +In the example above, the `properties` declaration will cause Flux to look for a public method in the `SpoutConfig` with +the signature `setIgnoreZkOffsets(boolean b)` and attempt to invoke it. If a setter method is not found, Flux will then +look for a public instance variable with the name `ignoreZkOffsets` and attempt to set its value. + +References may also be used as property values. + +####Configuration Methods +Conceptually, configuration methods are similar to Properties and Constructor Args -- they allow you to invoke an +arbitrary method on an object after it is constructed. Configuration methods are useful for working with classes that +don't expose JavaBean methods or have constructors that can fully configure the object. Common examples include classes +that use the builder pattern for configuration/composition. + +The following YAML example creates a bolt and configures it by calling several methods: + +```yaml +bolts: + - id: "bolt-1" + className: "org.apache.storm.flux.test.TestBolt" + parallelism: 1 + configMethods: + - name: "withFoo" + args: + - "foo" + - name: "withBar" + args: + - "bar" + - name: "withFooBar" + args: + - "foo" + - "bar" +``` + +The signatures of the corresponding methods are as follows: + +```java + public void withFoo(String foo); + public void withBar(String bar); + public void withFooBar(String foo, String bar); +``` + +Arguments passed to configuration methods work much the same way as constructor arguments, and support references as +well. + +### Using Java `enum`s in Contructor Arguments, References, Properties and Configuration Methods +You can easily use Java `enum` values as arguments in a Flux YAML file, simply by referencing the name of the `enum`. + +For example, [Storm's HDFS module](storm-hdfs.html) includes the following `enum` definition (simplified for brevity): + +```java +public static enum Units { + KB, MB, GB, TB +} +``` + +And the `org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy` class has the following constructor: + +```java +public FileSizeRotationPolicy(float count, Units units) + +``` +The following Flux `component` definition could be used to call the constructor: + +```yaml + - id: "rotationPolicy" + className: "org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy" + constructorArgs: + - 5.0 + - MB +``` + +The above definition is functionally equivalent to the following Java code: + +```java +// rotate files when they reach 5MB +FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, Units.MB); +``` + +## Topology Config +The `config` section is simply a map of Storm topology configuration parameters that will be passed to the +`org.apache.storm.StormSubmitter` as an instance of the `org.apache.storm.Config` class: + +```yaml +config: + topology.workers: 4 + topology.max.spout.pending: 1000 + topology.message.timeout.secs: 30 +``` + +# Existing Topologies +If you have existing Storm topologies, you can still use Flux to deploy/run/test them. This feature allows you to +leverage Flux Constructor Arguments, References, Properties, and Topology Config declarations for existing topology +classes. + +The easiest way to use an existing topology class is to define +a `getTopology()` instance method with one of the following signatures: + +```java +public StormTopology getTopology(Map config) +``` +or: + +```java +public StormTopology getTopology(Config config) +``` + +You could then use the following YAML to configure your topology: + +```yaml +name: "existing-topology" +topologySource: + className: "org.apache.storm.flux.test.SimpleTopology" +``` + +If the class you would like to use as a topology source has a different method name (i.e. not `getTopology`), you can +override it: + +```yaml +name: "existing-topology" +topologySource: + className: "org.apache.storm.flux.test.SimpleTopology" + methodName: "getTopologyWithDifferentMethodName" +``` + +__N.B.:__ The specified method must accept a single argument of type `java.util.Map` or +`org.apache.storm.Config`, and return a `org.apache.storm.generated.StormTopology` object. + +# YAML DSL +## Spouts and Bolts +Spout and Bolts are configured in their own respective section of the YAML configuration. Spout and Bolt definitions +are extensions to the `component` definition that add a `parallelism` parameter that sets the parallelism for a +component when the topology is deployed. + +Because spout and bolt definitions extend `component` they support constructor arguments, references, and properties as +well. + +Shell spout example: + +```yaml +spouts: + - id: "sentence-spout" + className: "org.apache.storm.flux.wrappers.spouts.FluxShellSpout" + # shell spout constructor takes 2 arguments: String[], String[] + constructorArgs: + # command line + - ["node", "randomsentence.js"] + # output fields + - ["word"] + parallelism: 1 +``` + +Kafka spout example: + +```yaml +components: + - id: "stringScheme" + className: "org.apache.storm.kafka.StringScheme" + + - id: "stringMultiScheme" + className: "org.apache.storm.spout.SchemeAsMultiScheme" + constructorArgs: + - ref: "stringScheme" + + - id: "zkHosts" + className: "org.apache.storm.kafka.ZkHosts" + constructorArgs: + - "localhost:2181" + +# Alternative kafka config +# - id: "kafkaConfig" +# className: "org.apache.storm.kafka.KafkaConfig" +# constructorArgs: +# # brokerHosts +# - ref: "zkHosts" +# # topic +# - "myKafkaTopic" +# # clientId (optional) +# - "myKafkaClientId" + + - id: "spoutConfig" + className: "org.apache.storm.kafka.SpoutConfig" + constructorArgs: + # brokerHosts + - ref: "zkHosts" + # topic + - "myKafkaTopic" + # zkRoot + - "/kafkaSpout" + # id + - "myId" + properties: + - name: "ignoreZkOffsets" + value: true + - name: "scheme" + ref: "stringMultiScheme" + +config: + topology.workers: 1 + +# spout definitions +spouts: + - id: "kafka-spout" + className: "org.apache.storm.kafka.KafkaSpout" + constructorArgs: + - ref: "spoutConfig" + +``` + +Bolt Examples: + +```yaml +# bolt definitions +bolts: + - id: "splitsentence" + className: "org.apache.storm.flux.wrappers.bolts.FluxShellBolt" + constructorArgs: + # command line + - ["python3", "splitsentence.py"] + # output fields + - ["word"] + parallelism: 1 + # ... + + - id: "log" + className: "org.apache.storm.flux.wrappers.bolts.LogInfoBolt" + parallelism: 1 + # ... + + - id: "count" + className: "org.apache.storm.testing.TestWordCounter" + parallelism: 1 + # ... +``` +## Streams and Stream Groupings +Streams in Flux are represented as a list of connections (Graph edges, data flow, etc.) between the Spouts and Bolts in +a topology, with an associated Grouping definition. + +A Stream definition has the following properties: + +**`name`:** A name for the connection (optional, currently unused) + +**`from`:** The `id` of a Spout or Bolt that is the source (publisher) + +**`to`:** The `id` of a Spout or Bolt that is the destination (subscriber) + +**`grouping`:** The stream grouping definition for the Stream + +A Grouping definition has the following properties: + +**`type`:** The type of grouping. One of `ALL`,`CUSTOM`,`DIRECT`,`SHUFFLE`,`LOCAL_OR_SHUFFLE`,`FIELDS`,`GLOBAL`, or `NONE`. + +**`streamId`:** The Storm stream ID (Optional. If unspecified will use the default stream) + +**`args`:** For the `FIELDS` grouping, a list of field names. + +**`customClass`** For the `CUSTOM` grouping, a definition of custom grouping class instance + +The `streams` definition example below sets up a topology with the following wiring: + +``` + kafka-spout --> splitsentence --> count --> log +``` + + +```yaml +#stream definitions +# stream definitions define connections between spouts and bolts. +# note that such connections can be cyclical +# custom stream groupings are also supported + +streams: + - name: "kafka --> split" # name isn't used (placeholder for logging, UI, etc.) + from: "kafka-spout" + to: "splitsentence" + grouping: + type: SHUFFLE + + - name: "split --> count" + from: "splitsentence" + to: "count" + grouping: + type: FIELDS + args: ["word"] + + - name: "count --> log" + from: "count" + to: "log" + grouping: + type: SHUFFLE +``` + +### Custom Stream Groupings +Custom stream groupings are defined by setting the grouping type to `CUSTOM` and defining a `customClass` parameter +that tells Flux how to instantiate the custom class. The `customClass` definition extends `component`, so it supports +constructor arguments, references, and properties as well. + +The example below creates a Stream with an instance of the `org.apache.storm.testing.NGrouping` custom stream grouping +class. + +```yaml + - name: "bolt-1 --> bolt2" + from: "bolt-1" + to: "bolt-2" + grouping: + type: CUSTOM + customClass: + className: "org.apache.storm.testing.NGrouping" + constructorArgs: + - 1 +``` + +## Includes and Overrides +Flux allows you to include the contents of other YAML files, and have them treated as though they were defined in the +same file. Includes may be either files, or classpath resources. + +Includes are specified as a list of maps: + +```yaml +includes: + - resource: false + file: "src/test/resources/configs/shell_test.yaml" + override: false +``` + +If the `resource` property is set to `true`, the include will be loaded as a classpath resource from the value of the +`file` attribute, otherwise it will be treated as a regular file. + +The `override` property controls how includes affect the values defined in the current file. If `override` is set to +`true`, values in the included file will replace values in the current file being parsed. If `override` is set to +`false`, values in the current file being parsed will take precedence, and the parser will refuse to replace them. + +**N.B.:** Includes are not yet recursive. Includes from included files will be ignored. + +## Worker Hooks +Flux allows you to attach topology components that can be executed when a worker starts, and when a worker shuts down. It can be useful when you want to execute operations before topology processing starts, or cleanup operations before your workers shut down, e.g. managing application context. Worker Hooks should be an implementation of [IWorkerHook]({{page.git-blob-base}}/storm-client/src/jvm/org/apache/storm/hooks/IWorkerHook.java). Other than that, they follow similar Bean definition semantics as [Components](##Components) for declaration within yaml file. + +Worker Hooks are specified as a map of bean definitions: + +```yaml +workerHooks: + - id: "base-worker-hook" + className: "org.apache.storm.hooks.BaseWorkerHook" +``` + +## Basic Word Count Example + +This example uses a spout implemented in JavaScript, a bolt implemented in Python, and a bolt implemented in Java + +Topology YAML config: + +```yaml +--- +name: "shell-topology" +config: + topology.workers: 1 + +# spout definitions +spouts: + - id: "sentence-spout" + className: "org.apache.storm.flux.wrappers.spouts.FluxShellSpout" + # shell spout constructor takes 2 arguments: String[], String[] + constructorArgs: + # command line + - ["node", "randomsentence.js"] + # output fields + - ["word"] + parallelism: 1 + +# bolt definitions +bolts: + - id: "splitsentence" + className: "org.apache.storm.flux.wrappers.bolts.FluxShellBolt" + constructorArgs: + # command line + - ["python3", "splitsentence.py"] + # output fields + - ["word"] + parallelism: 1 + + - id: "log" + className: "org.apache.storm.flux.wrappers.bolts.LogInfoBolt" + parallelism: 1 + + - id: "count" + className: "org.apache.storm.testing.TestWordCounter" + parallelism: 1 + +#stream definitions +# stream definitions define connections between spouts and bolts. +# note that such connections can be cyclical +# custom stream groupings are also supported + +streams: + - name: "spout --> split" # name isn't used (placeholder for logging, UI, etc.) + from: "sentence-spout" + to: "splitsentence" + grouping: + type: SHUFFLE + + - name: "split --> count" + from: "splitsentence" + to: "count" + grouping: + type: FIELDS + args: ["word"] + + - name: "count --> log" + from: "count" + to: "log" + grouping: + type: SHUFFLE +``` + + +## Micro-Batching (Trident) API Support +Currenty, the Flux YAML DSL only supports the Core Storm API, but support for Storm's micro-batching API is planned. + +To use Flux with a Trident topology, define a topology getter method and reference it in your YAML config: + +```yaml +name: "my-trident-topology" + +config: + topology.workers: 1 + +topologySource: + className: "org.apache.storm.flux.test.TridentTopologySource" + # Flux will look for "getTopology", this will override that. + methodName: "getTopologyWithDifferentMethodName" +``` diff --git a/docs/images/Storm-JMS-Example.png b/docs/images/Storm-JMS-Example.png new file mode 100644 index 00000000000..80e34932961 Binary files /dev/null and b/docs/images/Storm-JMS-Example.png differ diff --git a/docs/images/ack_tree.png b/docs/images/ack_tree.png new file mode 100644 index 00000000000..2134cc8a82d Binary files /dev/null and b/docs/images/ack_tree.png differ diff --git a/docs/images/architecture.png b/docs/images/architecture.png new file mode 100644 index 00000000000..caf97e0b8db Binary files /dev/null and b/docs/images/architecture.png differ diff --git a/docs/images/architecture.svg b/docs/images/architecture.svg new file mode 100644 index 00000000000..7da17440516 --- /dev/null +++ b/docs/images/architecture.svg @@ -0,0 +1,1458 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Tuple + + + + Tuple + + + + Tuple + + + + Tuple + + + + + + + + + Tuple + + + + + + + + Tuple + + + + + + + + Tuple + + + + + + + + Tuple + + + + + + + diff --git a/docs/images/batched-stream.png b/docs/images/batched-stream.png new file mode 100644 index 00000000000..1e6aa01f221 Binary files /dev/null and b/docs/images/batched-stream.png differ diff --git a/docs/images/bolt.png b/docs/images/bolt.png new file mode 100644 index 00000000000..7f89d1712d1 Binary files /dev/null and b/docs/images/bolt.png differ diff --git a/docs/images/bolt.svg b/docs/images/bolt.svg new file mode 100644 index 00000000000..5b8adb315e5 --- /dev/null +++ b/docs/images/bolt.svg @@ -0,0 +1,743 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + Tuple + + Tuple + + Tuple + + + + + + + Tuple + + Tuple + + Tuple + + + + + + + Tuple + + Tuple + + Tuple + + + + + diff --git a/docs/images/bullet.gif b/docs/images/bullet.gif new file mode 100644 index 00000000000..45bb956c39b Binary files /dev/null and b/docs/images/bullet.gif differ diff --git a/docs/images/disable-event-logging-topology.png b/docs/images/disable-event-logging-topology.png new file mode 100644 index 00000000000..77405edf403 Binary files /dev/null and b/docs/images/disable-event-logging-topology.png differ diff --git a/docs/images/download.png b/docs/images/download.png new file mode 100644 index 00000000000..5e07c78ca85 Binary files /dev/null and b/docs/images/download.png differ diff --git a/docs/images/drpc-workflow.png b/docs/images/drpc-workflow.png new file mode 100644 index 00000000000..99056487a21 Binary files /dev/null and b/docs/images/drpc-workflow.png differ diff --git a/docs/images/dynamic_log_level_settings_1.png b/docs/images/dynamic_log_level_settings_1.png new file mode 100644 index 00000000000..71d42e74780 Binary files /dev/null and b/docs/images/dynamic_log_level_settings_1.png differ diff --git a/docs/images/dynamic_log_level_settings_2.png b/docs/images/dynamic_log_level_settings_2.png new file mode 100644 index 00000000000..d0e61a7d0e5 Binary files /dev/null and b/docs/images/dynamic_log_level_settings_2.png differ diff --git a/docs/images/dynamic_profiling_debugging_1.png b/docs/images/dynamic_profiling_debugging_1.png new file mode 100644 index 00000000000..6be1f86381e Binary files /dev/null and b/docs/images/dynamic_profiling_debugging_1.png differ diff --git a/docs/images/dynamic_profiling_debugging_2.png b/docs/images/dynamic_profiling_debugging_2.png new file mode 100644 index 00000000000..342ad9439e7 Binary files /dev/null and b/docs/images/dynamic_profiling_debugging_2.png differ diff --git a/docs/images/dynamic_profiling_debugging_3.png b/docs/images/dynamic_profiling_debugging_3.png new file mode 100644 index 00000000000..5706d7e83a7 Binary files /dev/null and b/docs/images/dynamic_profiling_debugging_3.png differ diff --git a/docs/images/dynamic_profiling_debugging_4.png b/docs/images/dynamic_profiling_debugging_4.png new file mode 100644 index 00000000000..0afe9f47963 Binary files /dev/null and b/docs/images/dynamic_profiling_debugging_4.png differ diff --git a/docs/images/eclipse-project-properties.png b/docs/images/eclipse-project-properties.png new file mode 100644 index 00000000000..62f8d32b628 Binary files /dev/null and b/docs/images/eclipse-project-properties.png differ diff --git a/docs/images/enable-event-logging-spout.png b/docs/images/enable-event-logging-spout.png new file mode 100644 index 00000000000..081bab1467d Binary files /dev/null and b/docs/images/enable-event-logging-spout.png differ diff --git a/docs/images/enable-event-logging-topology.png b/docs/images/enable-event-logging-topology.png new file mode 100644 index 00000000000..dc8ee58c5c2 Binary files /dev/null and b/docs/images/enable-event-logging-topology.png differ diff --git a/docs/images/event-logs-view.png b/docs/images/event-logs-view.png new file mode 100644 index 00000000000..104f35c3bf2 Binary files /dev/null and b/docs/images/event-logs-view.png differ diff --git a/docs/images/example-of-a-running-topology.png b/docs/images/example-of-a-running-topology.png new file mode 100644 index 00000000000..1462b21a250 Binary files /dev/null and b/docs/images/example-of-a-running-topology.png differ diff --git a/docs/images/footer-bg.png b/docs/images/footer-bg.png new file mode 100644 index 00000000000..e72d575af49 Binary files /dev/null and b/docs/images/footer-bg.png differ diff --git a/docs/images/grouping.png b/docs/images/grouping.png new file mode 100644 index 00000000000..39112869a90 Binary files /dev/null and b/docs/images/grouping.png differ diff --git a/docs/images/hdfs_blobstore.png b/docs/images/hdfs_blobstore.png new file mode 100644 index 00000000000..11c5c1072ba Binary files /dev/null and b/docs/images/hdfs_blobstore.png differ diff --git a/docs/images/header-bg.png b/docs/images/header-bg.png new file mode 100644 index 00000000000..01a291e8fa4 Binary files /dev/null and b/docs/images/header-bg.png differ diff --git a/docs/images/incubator-logo.png b/docs/images/incubator-logo.png new file mode 100644 index 00000000000..33ca7f62272 Binary files /dev/null and b/docs/images/incubator-logo.png differ diff --git a/docs/images/ld-library-path-eclipse-linux.png b/docs/images/ld-library-path-eclipse-linux.png new file mode 100644 index 00000000000..b6fbbd901c3 Binary files /dev/null and b/docs/images/ld-library-path-eclipse-linux.png differ diff --git a/docs/images/loading.gif b/docs/images/loading.gif new file mode 100644 index 00000000000..06f47affd81 Binary files /dev/null and b/docs/images/loading.gif differ diff --git a/docs/images/local_blobstore.png b/docs/images/local_blobstore.png new file mode 100644 index 00000000000..ff8001e0e3c Binary files /dev/null and b/docs/images/local_blobstore.png differ diff --git a/docs/images/logo.png b/docs/images/logo.png new file mode 100644 index 00000000000..570276e91be Binary files /dev/null and b/docs/images/logo.png differ diff --git a/docs/images/logos/aeris.jpg b/docs/images/logos/aeris.jpg new file mode 100644 index 00000000000..adc2e18012d Binary files /dev/null and b/docs/images/logos/aeris.jpg differ diff --git a/docs/images/logos/alibaba.jpg b/docs/images/logos/alibaba.jpg new file mode 100644 index 00000000000..658a003393d Binary files /dev/null and b/docs/images/logos/alibaba.jpg differ diff --git a/docs/images/logos/bai.jpg b/docs/images/logos/bai.jpg new file mode 100644 index 00000000000..1bde805be37 Binary files /dev/null and b/docs/images/logos/bai.jpg differ diff --git a/docs/images/logos/cerner.jpg b/docs/images/logos/cerner.jpg new file mode 100644 index 00000000000..9a18cb6036c Binary files /dev/null and b/docs/images/logos/cerner.jpg differ diff --git a/docs/images/logos/flipboard.jpg b/docs/images/logos/flipboard.jpg new file mode 100644 index 00000000000..4d1eac11c68 Binary files /dev/null and b/docs/images/logos/flipboard.jpg differ diff --git a/docs/images/logos/fullcontact.jpg b/docs/images/logos/fullcontact.jpg new file mode 100644 index 00000000000..cc216103a95 Binary files /dev/null and b/docs/images/logos/fullcontact.jpg differ diff --git a/docs/images/logos/groupon.jpg b/docs/images/logos/groupon.jpg new file mode 100644 index 00000000000..97ae2c5c123 Binary files /dev/null and b/docs/images/logos/groupon.jpg differ diff --git a/docs/images/logos/health-market-science.jpg b/docs/images/logos/health-market-science.jpg new file mode 100644 index 00000000000..06ce60834c9 Binary files /dev/null and b/docs/images/logos/health-market-science.jpg differ diff --git a/docs/images/logos/images.png b/docs/images/logos/images.png new file mode 100644 index 00000000000..801cfce5d05 Binary files /dev/null and b/docs/images/logos/images.png differ diff --git a/docs/images/logos/infochimp.jpg b/docs/images/logos/infochimp.jpg new file mode 100644 index 00000000000..9b6e89f26b8 Binary files /dev/null and b/docs/images/logos/infochimp.jpg differ diff --git a/docs/images/logos/klout.jpg b/docs/images/logos/klout.jpg new file mode 100644 index 00000000000..69cdd3db233 Binary files /dev/null and b/docs/images/logos/klout.jpg differ diff --git a/docs/images/logos/loggly.jpg b/docs/images/logos/loggly.jpg new file mode 100644 index 00000000000..3f0eb81770a Binary files /dev/null and b/docs/images/logos/loggly.jpg differ diff --git a/docs/images/logos/ooyala.jpg b/docs/images/logos/ooyala.jpg new file mode 100644 index 00000000000..6a9480f702f Binary files /dev/null and b/docs/images/logos/ooyala.jpg differ diff --git a/docs/images/logos/parc.png b/docs/images/logos/parc.png new file mode 100644 index 00000000000..13a591ea607 Binary files /dev/null and b/docs/images/logos/parc.png differ diff --git a/docs/images/logos/premise.jpg b/docs/images/logos/premise.jpg new file mode 100644 index 00000000000..13f87608172 Binary files /dev/null and b/docs/images/logos/premise.jpg differ diff --git a/docs/images/logos/qiy.jpg b/docs/images/logos/qiy.jpg new file mode 100644 index 00000000000..ad7dce470f2 Binary files /dev/null and b/docs/images/logos/qiy.jpg differ diff --git a/docs/images/logos/quicklizard.jpg b/docs/images/logos/quicklizard.jpg new file mode 100644 index 00000000000..65328c6f571 Binary files /dev/null and b/docs/images/logos/quicklizard.jpg differ diff --git a/docs/images/logos/rocketfuel.jpg b/docs/images/logos/rocketfuel.jpg new file mode 100644 index 00000000000..b169a87b15b Binary files /dev/null and b/docs/images/logos/rocketfuel.jpg differ diff --git a/docs/images/logos/rubicon.jpg b/docs/images/logos/rubicon.jpg new file mode 100644 index 00000000000..da01e0adca2 Binary files /dev/null and b/docs/images/logos/rubicon.jpg differ diff --git a/docs/images/logos/spider.jpg b/docs/images/logos/spider.jpg new file mode 100644 index 00000000000..69aab3eee16 Binary files /dev/null and b/docs/images/logos/spider.jpg differ diff --git a/docs/images/logos/spotify.jpg b/docs/images/logos/spotify.jpg new file mode 100644 index 00000000000..8d6253ff36d Binary files /dev/null and b/docs/images/logos/spotify.jpg differ diff --git a/docs/images/logos/taobao.jpg b/docs/images/logos/taobao.jpg new file mode 100644 index 00000000000..54272afe089 Binary files /dev/null and b/docs/images/logos/taobao.jpg differ diff --git a/docs/images/logos/the-weather-channel.jpg b/docs/images/logos/the-weather-channel.jpg new file mode 100644 index 00000000000..f9d68f4f737 Binary files /dev/null and b/docs/images/logos/the-weather-channel.jpg differ diff --git a/docs/images/logos/twitter.jpg b/docs/images/logos/twitter.jpg new file mode 100644 index 00000000000..fb50bdb882c Binary files /dev/null and b/docs/images/logos/twitter.jpg differ diff --git a/docs/images/logos/verisign.jpg b/docs/images/logos/verisign.jpg new file mode 100644 index 00000000000..2a0dc7000be Binary files /dev/null and b/docs/images/logos/verisign.jpg differ diff --git a/docs/images/logos/webmd.jpg b/docs/images/logos/webmd.jpg new file mode 100644 index 00000000000..cec11ed49af Binary files /dev/null and b/docs/images/logos/webmd.jpg differ diff --git a/docs/images/logos/wego.jpg b/docs/images/logos/wego.jpg new file mode 100644 index 00000000000..27c62c4ff50 Binary files /dev/null and b/docs/images/logos/wego.jpg differ diff --git a/docs/images/logos/yahoo-japan.jpg b/docs/images/logos/yahoo-japan.jpg new file mode 100644 index 00000000000..ef213a71552 Binary files /dev/null and b/docs/images/logos/yahoo-japan.jpg differ diff --git a/docs/images/logos/yahoo.png b/docs/images/logos/yahoo.png new file mode 100755 index 00000000000..659e6defc07 Binary files /dev/null and b/docs/images/logos/yahoo.png differ diff --git a/docs/images/logos/yelp.jpg b/docs/images/logos/yelp.jpg new file mode 100644 index 00000000000..9e6b6e46178 Binary files /dev/null and b/docs/images/logos/yelp.jpg differ diff --git a/docs/images/mailinglist.png b/docs/images/mailinglist.png new file mode 100644 index 00000000000..ef44068c48b Binary files /dev/null and b/docs/images/mailinglist.png differ diff --git a/docs/images/nimbus_ha_blobstore.png b/docs/images/nimbus_ha_blobstore.png new file mode 100644 index 00000000000..26e8c2a5424 Binary files /dev/null and b/docs/images/nimbus_ha_blobstore.png differ diff --git a/docs/images/nimbus_ha_leader_election_and_failover.png b/docs/images/nimbus_ha_leader_election_and_failover.png new file mode 100644 index 00000000000..60cc1b74cb8 Binary files /dev/null and b/docs/images/nimbus_ha_leader_election_and_failover.png differ diff --git a/docs/images/nimbus_ha_topology_submission.png b/docs/images/nimbus_ha_topology_submission.png new file mode 100644 index 00000000000..7707e5ae4b0 Binary files /dev/null and b/docs/images/nimbus_ha_topology_submission.png differ diff --git a/docs/images/ras_new_strategy_network_cdf_random.png b/docs/images/ras_new_strategy_network_cdf_random.png new file mode 100644 index 00000000000..8b47f36367a Binary files /dev/null and b/docs/images/ras_new_strategy_network_cdf_random.png differ diff --git a/docs/images/ras_new_strategy_network_metric_cdf_yahoo_topologies.png b/docs/images/ras_new_strategy_network_metric_cdf_yahoo_topologies.png new file mode 100644 index 00000000000..6e5a04d20a0 Binary files /dev/null and b/docs/images/ras_new_strategy_network_metric_cdf_yahoo_topologies.png differ diff --git a/docs/images/ras_new_strategy_network_metric_improvement_random.png b/docs/images/ras_new_strategy_network_metric_improvement_random.png new file mode 100644 index 00000000000..3fd402917bd Binary files /dev/null and b/docs/images/ras_new_strategy_network_metric_improvement_random.png differ diff --git a/docs/images/ras_new_strategy_network_metric_random.png b/docs/images/ras_new_strategy_network_metric_random.png new file mode 100644 index 00000000000..11d95d95e83 Binary files /dev/null and b/docs/images/ras_new_strategy_network_metric_random.png differ diff --git a/docs/images/ras_new_strategy_network_metric_yahoo_topologies.png b/docs/images/ras_new_strategy_network_metric_yahoo_topologies.png new file mode 100644 index 00000000000..60ed7c1e67b Binary files /dev/null and b/docs/images/ras_new_strategy_network_metric_yahoo_topologies.png differ diff --git a/docs/images/ras_new_strategy_runtime_random.png b/docs/images/ras_new_strategy_runtime_random.png new file mode 100644 index 00000000000..0ba1c7374e7 Binary files /dev/null and b/docs/images/ras_new_strategy_runtime_random.png differ diff --git a/docs/images/ras_new_strategy_runtime_yahoo.png b/docs/images/ras_new_strategy_runtime_yahoo.png new file mode 100644 index 00000000000..0a510892da1 Binary files /dev/null and b/docs/images/ras_new_strategy_runtime_yahoo.png differ diff --git a/docs/images/relationships-worker-processes-executors-tasks.png b/docs/images/relationships-worker-processes-executors-tasks.png new file mode 100644 index 00000000000..ef6f3fd45cd Binary files /dev/null and b/docs/images/relationships-worker-processes-executors-tasks.png differ diff --git a/docs/images/resource_aware_scheduler_default_eviction_strategy.png b/docs/images/resource_aware_scheduler_default_eviction_strategy.png new file mode 100644 index 00000000000..9a9917fc679 Binary files /dev/null and b/docs/images/resource_aware_scheduler_default_eviction_strategy.png differ diff --git a/docs/images/resource_aware_scheduler_default_eviction_strategy.svg b/docs/images/resource_aware_scheduler_default_eviction_strategy.svg new file mode 100644 index 00000000000..406c807acb1 --- /dev/null +++ b/docs/images/resource_aware_scheduler_default_eviction_strategy.svg @@ -0,0 +1,3 @@ + + +
Yes/No
[Not supported by viewer]
Schedule toplogy ti
[Not supported by viewer]
Determine if topologies should be evicted
[Not supported by viewer]
Can topology ti from user ui be scheduled?
[Not supported by viewer]
>0 / 0
[Not supported by viewer]
How much resources does topology ti need outside of user ui's resource guarantee:
ui's allocated resource above guarantee + additional resources needed above guarantee to schedule ti
[Not supported by viewer]

Determine if tj is evicted can we schedule ti .

[Not supported by viewer]
Find user uj, the user that has the most resources above his or her guarantee

[Not supported by viewer]
Yes/No
[Not supported by viewer]

Recalculate how much resources users have above their guarantee taken into account that tj is evicted. Add tj to list L, a list of topologies to potentially evict

[Not supported by viewer]
Find user uj, the user who has the most resources above his or her resource guarantee > ui's allocated resource above guarantee + additional resources needed above guarantee to schedule ti
[Not supported by viewer]
Topology cannot be scheduled.
[Not supported by viewer]
Find a topology tj with the lowest priority that has been scheduled and belongs to user uj.
[Not supported by viewer]
None?
Yes/No
[Not supported by viewer]
Total cluster resources (+/- room for fragmention) < Total resource guarantees
[Not supported by viewer]
None?
No/Yes
[Not supported by viewer]
Wont be fair to allocate user ui more resources
[Not supported by viewer]

Determine if tj is evicted can we schedule ti .

[Not supported by viewer]
No/Yes
[Not supported by viewer]

Recalculate how much resources users have above their guarantee taken into account that tj is evicted. Add tj to list L when hold a list of topologies that may be evicted

[Not supported by viewer]
Find a topology tj with the lowest priority that has been scheduled and belongs to user uj
[Not supported by viewer]
Evict topologies in list L
[Not supported by viewer]
\ No newline at end of file diff --git a/docs/images/search-a-topology.png b/docs/images/search-a-topology.png new file mode 100644 index 00000000000..8d6153c96f0 Binary files /dev/null and b/docs/images/search-a-topology.png differ diff --git a/docs/images/search-for-a-single-worker-log.png b/docs/images/search-for-a-single-worker-log.png new file mode 100644 index 00000000000..8c6f423cb8e Binary files /dev/null and b/docs/images/search-for-a-single-worker-log.png differ diff --git a/docs/images/security.png b/docs/images/security.png new file mode 100644 index 00000000000..299e26d115c Binary files /dev/null and b/docs/images/security.png differ diff --git a/docs/images/security.svg b/docs/images/security.svg new file mode 100644 index 00000000000..526aa63d263 --- /dev/null +++ b/docs/images/security.svg @@ -0,0 +1,1779 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + DRPC + + + + + ZooKeeper + + + + + Nimbus + + + + + Supervisor + + + + + + Workers + + + + + + + + + + + + + + + + + + + 1 + + + + + User + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + UI + + + + + + + + + + + + + + + + + + + + + + + + + User Processes + + + + + + + + + Log Viewer + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1 + + + + + 1 + + + + + 1 + + + + + 1 + + + + + 2 + + + + + 2 + + + + + 3 + + + + + 3 + + + + + 3 + + + + + 1 + + + + + 1 + + + + + 1 + + + + + 4 + + + + + 4 + + + + + + + + + + + + + + + + 1 + + + + + 1. Kerberos + + + + + 2. SharedSecret + + + + + 3. HTTPAuth(Bouncer) + + + + + 4. File System Permissions + + + + + diff --git a/docs/images/spout-vs-state.png b/docs/images/spout-vs-state.png new file mode 100644 index 00000000000..b6b06b382e7 Binary files /dev/null and b/docs/images/spout-vs-state.png differ diff --git a/docs/images/spout.png b/docs/images/spout.png new file mode 100644 index 00000000000..cab98128041 Binary files /dev/null and b/docs/images/spout.png differ diff --git a/docs/images/spout.svg b/docs/images/spout.svg new file mode 100644 index 00000000000..01059577445 --- /dev/null +++ b/docs/images/spout.svg @@ -0,0 +1,833 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + Tuple + + + + Tuple + + + + Tuple + + + + Tuple + + + + Tuple + + + + Tuple + + + + Tuple + + + + + + + + + + Tuple + + Tuple + + Tuple + + Tuple + + Tuple + + Tuple + + Tuple + + + + + diff --git a/docs/images/storm-cluster.png b/docs/images/storm-cluster.png new file mode 100644 index 00000000000..df2ddb8c164 Binary files /dev/null and b/docs/images/storm-cluster.png differ diff --git a/docs/images/storm-flow.png b/docs/images/storm-flow.png new file mode 100644 index 00000000000..45df814b662 Binary files /dev/null and b/docs/images/storm-flow.png differ diff --git a/docs/images/storm-sql-internal-example-exported-from-drawio.xml b/docs/images/storm-sql-internal-example-exported-from-drawio.xml new file mode 100644 index 00000000000..cd0247cc843 --- /dev/null +++ b/docs/images/storm-sql-internal-example-exported-from-drawio.xml @@ -0,0 +1,2 @@ + +7Vtbb6M4FP41kXZXagThkvCYpulMpfQyIaPZPo0ccIinBrPgTJL59WuDnXLLlM02EKkgteBjc3z5Pp9zOHJ62sTffYpAuL4nLsS9geLuetpNbzAYmjr7zwX7VGAM1VTgRchNRRmBjX5BIVSEdINcGOcaUkIwRWFe6JAggA7NyUAUkW2+2YrgfK8h8GBJYDsAl6XfkEvXQqqa1mvFZ4i8teh6NBimFT6QjcVM4jVwyTYj0qY9bRIRQtMnfzeBmK+dXJf0vdsjtYeBRTCgdV7QNRMYrrEcuStXBZZ1JVX8BHgjZitGSvdy+ts1otAOgcPLW4ZwT7teUx+zksoeMVhCfA2cFy8im8CdEEyi5EVtlVysyYoE9Bb4CHMCTIiPHNaDDYKY3e5t0UDgrhqinFGkJBeTA4y8gMkcNl8Y8YYIY9kwIAFTcO2CeA1dMTheeAKUNQ4SyUDhUjFnGFG4O7qS6gEfxmtIfEijPWsiSa0ISAWnNVHcZghiCtk6ww1dNgSClN5B9Stu7EFAVxNGdfARYWT7nQIUcA03yvvAqusXBWsFqiamYmVz8Jr/bIisuIoTDMZ8mZRw91rJnjx+7/f7UtEyksKy5O7Bns4XTMvdw+KR3Wbj+afp98f5zXRuc+Sns+kkqWb9s8kpXx/uFt+f5neTKSv8xf6+fB0/LO4Wz+xxzN9YPC7GM3a/nT/es9tB07fP0/n0DQVsUMDnnBWDVQxFjpita7oexyZyyXuhBs/ldsFwRd+J5nmWHyidobmmVtBcVc5B87Lxsr/MEsAAhT6bUlwCkE2d5jGLaUReYMGIVNiVku3hC4mYsx+LCh+5Lu/muookCR8SuGpi1ySV3oEYh0hFMIP5ubIB1CqYMTgHMbS3vRqLqEL+uMJwN+axHlsHGLji8cbBII7ZguaokmXFGYGAbimsfBOGzDIbFassZRHEgKKfefVVSy96eCIo8RcCZc0soKxYfSOvJCabyIHivWxEWVClFwmjFohAQeRBWlKUcOEw8dPooR91jy76Wekdudm4EhaAu0dhBEoOUuqJQxBI2QIsMf8qCDJuJ1ufESfdH3E9eQtymiOS1u1/8vU2ud6Kx/gOGTnQcUpGltUsR4Zu8Il4EXARzKmHwFya5js5rAJjjQqzNKjYMOY5zJLRKO9uEU7bdqRrmXRqq6wzG2XdU0R+QIci0pm7C2Beu/Zu2LyfvScuWu076rVPPc3QZGzYCvmsEq48rrZFUeCThzoDK9wh+jcX9w1RepY1ARtZpooXn4WCH5DSvYATbChhIhLRNfFIAPCMkFB+SDT9wcDWIYnLfx8QpyH374KXMvq1vyhOT2YpHZK1kTRqIGm2hqTaIVkbSbMGksPWkOwyLOfJsJjFtMjpGRZz1F6GRdW7nV57p6t1jLba3l5vNm1hUxL5dkg29ONF8S6Ao1VlFG86I7hcVUfxQxbGw3fK4w8HF5QvU83OitS3IsM6VsRqzYo0mwz4qMnPy7Mg7SY/1VFnQuqbEKuGCcmcP2vahFiNmpCPnMm+PDPSbiQiO2o0BkbBS8e89pnXdiJbplYy3JsRj5884tPCnBUF3LtDTuc55KRbumTCXoJVtkpWU6ecBuUk3NN6H3fMaJ4Zw8LByCpeHM5Anp8YVQecuuzsfwlSq1Oqo7dSqnVzs5ZeUlRM854xOzs4fiQkjTJOPR++3CDswqgfwK1NIwj8P/r9/p+VR8bTM9nBMg6TsnJOUX+VZAMuZDA+CC9kJJRcE0yPD6Y69CwYko9+JN4a5rdyZb6j6uTzmc7El/NsydcE/6wnIVs0b99FBQ1FBVbRXZhqve+IE6ICVnz9OWDqJF5/U6lN/wU= \ No newline at end of file diff --git a/docs/images/storm-sql-internal-example.png b/docs/images/storm-sql-internal-example.png new file mode 100644 index 00000000000..c02d47ebc9c Binary files /dev/null and b/docs/images/storm-sql-internal-example.png differ diff --git a/docs/images/storm-sql-internal-workflow-exported-from-drawio.xml b/docs/images/storm-sql-internal-workflow-exported-from-drawio.xml new file mode 100644 index 00000000000..a583c930df3 --- /dev/null +++ b/docs/images/storm-sql-internal-workflow-exported-from-drawio.xml @@ -0,0 +1,2 @@ + +7Vtbc6M2FP41fkyGO/gxcZJ2dnZn2roz7T7KIGO6MvKCHNv99ZVAwggExhds0tgPjjm6IOl837lIysicLLe/JGC1+IYDiEaGFmxH5svIMGzTpd9MsOOCsZcLwiQKcpG+F0yjfyEXaly6jgKYShUJxohEK1no4ziGPpFkIEnwRq42x0h+6wqEsCaY+gDVpX9FAVlwqe6M9wW/wihc8Fd7Bp/wEojKfCbpAgR4UxKZryNzkmBM8l/L7QQitnZiXfJ2bw2lxcASGJMuDQzPNCAMDH9Gv3VgPfAe3gFa88mODAfRvp7nmHZJR0x2fBmcn2ssCh7STElPtIJur7b7QvorZH+nv38VHc0SIUwJIHBJh5qKMjrS/D15hXoTvmzFIIwEr+MAsulotHiziAicroDPSjcUfFS2IEtEn3Q21AihCUY4ydqaAYDe3M8GkuAfsFTi+B6czWlJmIAgoiMslbkQOFDjEy/J37IPl0/5CEW9N7CMEIP7BC8jn05hCmI6a+3btJjUO0wI3DYqUi/gQWkF8RKSZEer8AYmB9RO6JU/b0r4FKBblKDpcBnglAiLnveooT84cDqCyOgJRE/TPxuBckFYzOd+YHgqWATueKZpaljM58DWhLq5zdLtLupvwtEFYGFYMi708S1xYfVlXAhOlmxdFrs0oma6bDY69pl1Vul2hUDcwTCd8YY7bNWwtcwhwdbpCbZfnv64hjkLbOgFlgoXnjEzHUeNi7EbaK47MFw4gzJnXs/mzEfrlMDkjpGjMOJ5lVDIuSVGdFcBkoriYBw8sayEPvkIpNSFybqiE092f3O9Zg/f2cOjXSwYDGoJS2W5aIYEkhCStoitvqSlJbMVKyZkCUSARO/yEFTLyN/wG44yfggV27LGTIohqYsUrxMf8lblbKbSkaFVVV/pKF+DWkeZVotpn6Zo81DmlLNW78pmSgAiwyCB1HCAWZFWrNg0sonZzyP7hUoAisKYoYjqntoN85kRiQVET7xgGQUBa/+MwAyiZ+D/CDOrIcUD7HMBTmvZp43TPD3nUxoVSXEZvA2EamT/g/ZIGxkSDERSeypMRRU8n6fw4sDRVcCpgIFpaXXWOh5IE/SKX1XYTNtTWABT78Nojo+ymTGOGaQDkC4y76fLtOlgIRt0cnV7eKy9K1RyBfsmFP3plFLdUemspGsoRbXT8hmUUiWAsFv2sUoSHbnjxyvGCgdChVMThwlAPo36aTnC4X0j5ENshHRmlHqjV7/pRq/dE5B5BnwRaBG8oroJd3cADwPARXg6CASr0vNLIPgFEMBaZ/6ny/HWULD3oXaCjsVebV/5ptjTVXHAJ4reLM27UPR2KAzsMwRXHWp9JiVWtg6Oz5MaOrqqElWBzGdQ4vlKs9q132cWVd8myuLG7KpJRXn13VTZvXGNln0hF3XfU1X5WdkTd9x1Pegvqw72qF3Xc/ynXeWoUfeflgKmRh/us7hDVeIo5Zy4eYMTssAhjgF63UsrCikfsmwjkp2xZMcq7On7qHr8khcVBzD04R9IyI6rA6wJpqL9e79ivOpuErgoZ17TlPM6rUc5plqDnS3EyfowVZtRUiztFzjdh7ACsbWo1mgMmg9y+4OdlByOeU86KSldKGw6KbEs2XwLgA3zpKSgSFOmo6SzW+azNmo/Tr0yn80OfLZuxud6emL+/8jX0zHlgQM3Sj7NFS/YSa879zBdbtATE61W18tDpxN87ZX5Z3Xgn30z/tUzy/qy3/nXhtBW/nneWHZ+g74mYNjnUW44Ts/uQDrnZqSr7wTUF/5OujaMtpLOFFHFQFnmXsSxid97jjXxz7gBAZ0OBPRuRsD6bWvnTsCuBGy/HMecnuUMmn/elfnn3oB/7qD5V7/VfudfZ/41qK3gn+6O5aTv4Ux3KFyrIfdqVW8znUJX+rj/T9S8+v7fec3X/wA= \ No newline at end of file diff --git a/docs/images/storm-sql-internal-workflow.png b/docs/images/storm-sql-internal-workflow.png new file mode 100644 index 00000000000..abcb518ffcc Binary files /dev/null and b/docs/images/storm-sql-internal-workflow.png differ diff --git a/docs/images/storm.svg b/docs/images/storm.svg new file mode 100644 index 00000000000..2f1dae04259 --- /dev/null +++ b/docs/images/storm.svg @@ -0,0 +1,1326 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + Tuple + + + + Tuple + + + + Tuple + + + + Tuple + + + + Tuple + + + + Tuple + + + + Tuple + + + + + + + + + + + + + + + + + + + + Tuple + + Tuple + + Tuple + + Tuple + + Tuple + + Tuple + + Tuple + + + + + + + Tuple + + Tuple + + Tuple + + Tuple + + + + + + + Tuple + + Tuple + + Tuple + + + + + + + Tuple + + Tuple + + Tuple + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/images/storm_header.png b/docs/images/storm_header.png new file mode 100644 index 00000000000..3a4c58ab982 Binary files /dev/null and b/docs/images/storm_header.png differ diff --git a/docs/images/storm_logo_tagline_color.png b/docs/images/storm_logo_tagline_color.png new file mode 100644 index 00000000000..56233eadcaf Binary files /dev/null and b/docs/images/storm_logo_tagline_color.png differ diff --git a/docs/images/storm_ui.png b/docs/images/storm_ui.png new file mode 100644 index 00000000000..45aae41213f Binary files /dev/null and b/docs/images/storm_ui.png differ diff --git a/docs/images/supervisor_page.png b/docs/images/supervisor_page.png new file mode 100644 index 00000000000..5133681251b Binary files /dev/null and b/docs/images/supervisor_page.png differ diff --git a/docs/images/top_bg.gif b/docs/images/top_bg.gif new file mode 100644 index 00000000000..fb13d3f97c7 Binary files /dev/null and b/docs/images/top_bg.gif differ diff --git a/docs/images/topology-tasks.png b/docs/images/topology-tasks.png new file mode 100644 index 00000000000..0affaba07a0 Binary files /dev/null and b/docs/images/topology-tasks.png differ diff --git a/docs/images/topology.png b/docs/images/topology.png new file mode 100644 index 00000000000..a45c25c0fc9 Binary files /dev/null and b/docs/images/topology.png differ diff --git a/docs/images/topology.svg b/docs/images/topology.svg new file mode 100644 index 00000000000..0f4f58f619e --- /dev/null +++ b/docs/images/topology.svg @@ -0,0 +1,1044 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Tuple + + Tuple + + Tuple + + + + + + + Tuple + + Tuple + + Tuple + + + + + + + Tuple + + Tuple + + Tuple + + + + + + + Tuple + + Tuple + + Tuple + + + + + + + + + + + + + + + + + Tuple + + Tuple + + Tuple + + + + + + + Tuple + + Tuple + + Tuple + + + + + + + + + + diff --git a/docs/images/topology_dark.png b/docs/images/topology_dark.png new file mode 100644 index 00000000000..4d4e78710b3 Binary files /dev/null and b/docs/images/topology_dark.png differ diff --git a/docs/images/topology_dark.svg b/docs/images/topology_dark.svg new file mode 100644 index 00000000000..986de4356f3 --- /dev/null +++ b/docs/images/topology_dark.svg @@ -0,0 +1,1101 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Tuple + + Tuple + + Tuple + + + + + + + Tuple + + Tuple + + Tuple + + + + + + + Tuple + + Tuple + + Tuple + + + + + + + Tuple + + Tuple + + Tuple + + + + + + + + + + + + + + + + + Tuple + + Tuple + + Tuple + + + + + + + Tuple + + Tuple + + Tuple + + + + + + + + + + diff --git a/docs/images/transactional-batches.png b/docs/images/transactional-batches.png new file mode 100644 index 00000000000..ab71509d064 Binary files /dev/null and b/docs/images/transactional-batches.png differ diff --git a/docs/images/transactional-commit-flow.png b/docs/images/transactional-commit-flow.png new file mode 100644 index 00000000000..844f19d6710 Binary files /dev/null and b/docs/images/transactional-commit-flow.png differ diff --git a/docs/images/transactional-design-2.png b/docs/images/transactional-design-2.png new file mode 100644 index 00000000000..abc83d99c0c Binary files /dev/null and b/docs/images/transactional-design-2.png differ diff --git a/docs/images/transactional-spout-structure.png b/docs/images/transactional-spout-structure.png new file mode 100644 index 00000000000..ecf7deffd52 Binary files /dev/null and b/docs/images/transactional-spout-structure.png differ diff --git a/docs/images/trident-to-storm1.png b/docs/images/trident-to-storm1.png new file mode 100644 index 00000000000..b022776e0ac Binary files /dev/null and b/docs/images/trident-to-storm1.png differ diff --git a/docs/images/trident-to-storm2.png b/docs/images/trident-to-storm2.png new file mode 100644 index 00000000000..6aa0fc522af Binary files /dev/null and b/docs/images/trident-to-storm2.png differ diff --git a/docs/images/tuple-dag.png b/docs/images/tuple-dag.png new file mode 100644 index 00000000000..34611d49782 Binary files /dev/null and b/docs/images/tuple-dag.png differ diff --git a/docs/images/tuple_tree.png b/docs/images/tuple_tree.png new file mode 100644 index 00000000000..b14f5580763 Binary files /dev/null and b/docs/images/tuple_tree.png differ diff --git a/docs/images/ui_topology_viz.png b/docs/images/ui_topology_viz.png new file mode 100644 index 00000000000..6152568ae18 Binary files /dev/null and b/docs/images/ui_topology_viz.png differ diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 00000000000..e4367e669d0 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,103 @@ +--- +title: Documentation +layout: documentation +documentation: true +--- +### Basics of Storm + +* [Javadoc](javadocs/index.html) +* [Tutorial](Tutorial.html) +* [Concepts](Concepts.html) +* [Scheduler](Storm-Scheduler.html) +* [Configuration](Configuration.html) +* [Guaranteeing message processing](Guaranteeing-message-processing.html) +* [Daemon Fault Tolerance](Daemon-Fault-Tolerance.html) +* [Command line client](Command-line-client.html) +* [REST API](STORM-UI-REST-API.html) +* [Understanding the parallelism of a Storm topology](Understanding-the-parallelism-of-a-Storm-topology.html) +* [FAQ](FAQ.html) + +### Layers on top of Storm + +#### Trident + +Trident is an alternative interface to Storm. It provides exactly-once processing, "transactional" datastore persistence, and a set of common stream analytics operations. + +* [Trident Tutorial](Trident-tutorial.html) -- basic concepts and walkthrough +* [Trident API Overview](Trident-API-Overview.html) -- operations for transforming and orchestrating data +* [Trident State](Trident-state.html) -- exactly-once processing and fast, persistent aggregation +* [Trident spouts](Trident-spouts.html) -- transactional and non-transactional data intake +* [Trident RAS API](Trident-RAS-API.html) -- using the Resource Aware Scheduler with Trident. + +#### Streams API + +Stream APIs is another alternative interface to Storm. It provides a typed API for expressing streaming computations and supports functional style operations. + +NOTE: Streams API is an `experimental` feature, and further works might break backward compatibility. +We're also notifying it via annotating classes with marker interface `@InterfaceStability.Unstable`. + +* [Streams API](Stream-API.html) + +#### Flux + +* [Flux Data Driven Topology Builder](flux.html) + +### Setup and Deploying + +* [Setting up a Storm cluster](Setting-up-a-Storm-cluster.html) +* [Local mode](Local-mode.html) +* [Troubleshooting](Troubleshooting.html) +* [Running topologies on a production cluster](Running-topologies-on-a-production-cluster.html) +* [Building Storm](Maven.html) with Maven +* [Setting up a Secure Cluster](SECURITY.html) +* [CGroup Enforcement](cgroups_in_storm.html) +* [Pacemaker reduces load on zookeeper for large clusters](Pacemaker.html) +* [Resource Aware Scheduler](Resource_Aware_Scheduler_overview.html) +* [Generic Resources](Generic-resources.html) +* [Daemon Metrics/Monitoring](ClusterMetrics.html) +* [Windows users guide](windows-users-guide.html) +* [Classpath handling](Classpath-handling.html) + +### Intermediate + +* [Serialization](Serialization.html) +* [Common patterns](Common-patterns.html) +* [DSLs and multilang adapters](DSLs-and-multilang-adapters.html) +* [Using non-JVM languages with Storm](Using-non-JVM-languages-with-Storm.html) +* [Distributed RPC](Distributed-RPC.html) +* [Hooks](Hooks.html) +* [Metrics (Deprecated)](Metrics.html) +* [Metrics V2](metrics_v2.html) +* [State Checkpointing](State-checkpointing.html) +* [Windowing](Windowing.html) +* [Joining Streams](Joins.html) +* [Blobstore(Distcache)](distcache-blobstore.html) + +### Debugging +* [Dynamic Log Level Settings](dynamic-log-level-settings.html) +* [Searching Worker Logs](Logs.html) +* [Worker Profiling](dynamic-worker-profiling.html) +* [Event Logging](Eventlogging.html) + +### Integration With External Systems, and Other Libraries +* [Apache Kafka Integration](storm-kafka-client.html) +* [Apache HBase Integration](storm-hbase.html) +* [Apache HDFS Integration](storm-hdfs.html) +* [JDBC Integration](storm-jdbc.html) +* [JMS Integration](storm-jms.html) +* [Redis Integration](storm-redis.html) + +#### Container, Resource Management System Integration + +* [YARN Integration](https://github.com/yahoo/storm-yarn) +* [Mesos Integration](https://github.com/mesos/storm) +* [Docker Integration](https://hub.docker.com/_/storm/) +* [Kubernetes Integration](https://github.com/kubernetes/examples/tree/master/staging/storm) + +### Advanced + +* [Defining a non-JVM language DSL for Storm](Defining-a-non-jvm-language-dsl-for-storm.html) +* [Multilang protocol](Multilang-protocol.html) (how to provide support for another language) +* [Implementation docs](Implementation-docs.html) +* [Storm Metricstore](storm-metricstore.html) + diff --git a/docs/metrics_v2.md b/docs/metrics_v2.md new file mode 100644 index 00000000000..d65635c14a6 --- /dev/null +++ b/docs/metrics_v2.md @@ -0,0 +1,177 @@ +--- +title: Metrics Reporting API v2 +layout: documentation +documentation: true +--- +Apache Storm version 1.2 introduced a new metrics system for reporting +internal statistics (e.g. acked, failed, emitted, transferred, queue metrics, etc.) as well as a +new API for user defined metrics. + +The new metrics system is based on [Dropwizard Metrics](http://metrics.dropwizard.io). + + +## User Defined Metrics +To allow users to define custom metrics, the following methods have been added to the `TopologyContext` +class, an instance of which is passed to spout's `open()` method and bolt's `prepare()` method: + + public Timer registerTimer(String name) + + public Histogram registerHistogram(String name) + + public Meter registerMeter(String name) + + public Counter registerCounter(String name) + + public Gauge registerGauge(String name, Gauge gauge) + +API documentation: [Timer](http://metrics.dropwizard.io/4.0.0/apidocs/com/codahale/metrics/Timer.html), +[Histogram](http://metrics.dropwizard.io/4.0.0/apidocs/com/codahale/metrics/Histogram.html), +[Meter](http://metrics.dropwizard.io/4.0.0/apidocs/com/codahale/metrics/Meter.html), +[Counter](http://metrics.dropwizard.io/4.0.0/apidocs/com/codahale/metrics/Counter.html), +[Gauge](http://metrics.dropwizard.io/4.0.0/apidocs/com/codahale/metrics/Gauge.html) + +Each of these methods takes a `name` parameter that acts as an identifier. When metrics are +registered, Storm will add additional information such as hostname, port, topology ID, etc. to form a unique metric +identifier. For example, if we register a metric named `myCounter` as follows: + +```java + Counter myCounter = topologyContext.registerCounter("myCounter"); +``` +the resulting name sent to metrics reporters will expand to: + +``` + storm.topology.{topology ID}.{hostname}.{component ID}.{task ID}.{worker port}-myCounter +``` + +The additional information allows for the unique identification of metrics for component instances across the cluster. + +*Important Note:* In order to ensure metric names can be reliably parsed, any `.` characters in name components will +be replaced with an underscore (`_`) character. For example, the hostname `storm.example.com` will appear as +`storm_example_com` in the metric name. This character substitution *is not applied to the user-supplied `name` parameter. + +### Example: Tuple Counter Bolt +The following example is a simple bolt implementation that will report the running total up tuples received by a bolt: + +```java +public class TupleCountingBolt extends BaseRichBolt { + private Counter tupleCounter; + @Override + public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) { + this.tupleCounter = context.registerCounter("tupleCount"); + } + + @Override + public void execute(Tuple input) { + this.tupleCounter.inc(); + } +} +``` + +## Metric Reporter Configuration + + For metrics to be useful they must be *reported*, in other words sent somewhere where they can be consumed and analyzed. + That can be as simple as writing them to a log file, sending them to a time series database, or exposing them via JMX. + +The following metric reporters are supported + + * Console Reporter (`org.apache.storm.metrics2.reporters.ConsoleStormReporter`): + Reports metrics to `System.out`. + * CSV Reporter (`org.apache.storm.metrics2.reporters.CsvStormReporter`): + Reports metrics to a CSV file. + * Graphite Reporter (`org.apache.storm.metrics2.reporters.GraphiteStormReporter`): + Reports metrics to a [Graphite](https://graphiteapp.org) server. + * JMX Reporter (`org.apache.storm.metrics2.reporters.JmxStormReporter`): + Exposes metrics via JMX. + + Custom metrics reporters can be created by implementing `org.apache.storm.metrics2.reporters.StormReporter` interface + or extending `org.apache.storm.metrics2.reporters.ScheduledStormReporter` class. + + By default, Storm will collect metrics but not "report" or + send the collected metrics anywhere. To enable metrics reporting, add a `topology.metrics.reporters` section to `storm.yaml` + or in topology configuration and configure one or more reporters. + + The following example configuration sets up two reporters: a Graphite Reporter and a Console Reporter: + + ```yaml +topology.metrics.reporters: + # Graphite Reporter + - class: "org.apache.storm.metrics2.reporters.GraphiteStormReporter" + report.period: 60 + report.period.units: "SECONDS" + graphite.host: "localhost" + graphite.port: 2003 + + # Console Reporter + - class: "org.apache.storm.metrics2.reporters.ConsoleStormReporter" + report.period: 10 + report.period.units: "SECONDS" + filter: + class: "org.apache.storm.metrics2.filters.RegexFilter" + expression: ".*my_component.*emitted.*" +``` + +Each reporter section begins with a `class` parameter representing the fully-qualified class name of the reporter +implementation. + +Many reporter implementations are *scheduled*, meaning they report metrics at regular intervals. The reporting interval +is determined by the `report.period` and `report.period.units` parameters. + +Reporters can also be configured with an optional filter that determines which metrics get reported. Storm includes the +`org.apache.storm.metrics2.filters.RegexFilter` filter which uses a regular expression to determine which metrics get +reported. Custom filters can be created by implementing the `org.apache.storm.metrics2.filters.StormMetricFilter` +interface: + +```java +public interface StormMetricsFilter extends MetricFilter { + + /** + * Called after the filter is instantiated. + * @param config A map of the properties from the 'filter' section of the reporter configuration. + */ + void prepare(Map config); + + /** + * Returns true if the given metric should be reported. + */ + boolean matches(String name, Metric metric); + +} +``` + +V2 metrics can be reported with a long name (such as storm.topology.mytopologyname-17-1595349167.hostname.__system.-1.6700-memory.pools.Code-Cache.max) or with a short +name and dimensions (such as memory.pools.Code-Cache.max with dimensions task Id of -1 and component Id of __system) if reporters support this. Each reporter defaults +to using the long metric name, but can report the short name by configuring report.dimensions.enabled to true for the reporter. + +## Backwards Compatibility Notes + +1. V2 metrics can also be reported to the Metrics Consumers registered with `topology.metrics.consumer.register` by enabling the `topology.enable.v2.metrics.tick` configuration. +The rate that they will reported to Metric Consumers is controlled by `topology.v2.metrics.tick.interval.seconds`, defaulting to every 60 seconds. + +2. Starting from storm 2.3, the config `storm.metrics.reporters` is deprecated in favor of `topology.metrics.reporters`. + +3. Starting from storm 2.3, the `daemons` section is removed from `topology.metrics.reporters` (or `storm.metrics.reporters`). + Before storm 2.3, a `daemons` section is required in the reporter conf to determine which daemons the reporters will apply to. +However, the reporters configured with `topology.metrics.reporters` (or `storm.metrics.reporters`) actually only apply to workers. They are never really used in daemons like nimbus, supervisor and etc. + For daemon metrics, please refer to [Cluster Metrics](ClusterMetrics.html). + +4. **Backwards Compatibility Breakage**: starting from storm 2.3, the following configs no longer apply to `topology.metrics.reporters`: + ```yaml + storm.daemon.metrics.reporter.plugin.locale + storm.daemon.metrics.reporter.plugin.rate.unit + storm.daemon.metrics.reporter.plugin.duration.unit + ``` + + They only apply to daemon metric reporters configured via `storm.daemon.metrics.reporter.plugins` for storm daemons. + The corresponding configs for `topology.metrics.reporters` can be configured in reporter conf with `locale`, `rate.unit`, `duration.unit` respectively, for example, + ```yaml + topology.metrics.reporters: + # Console Reporter + - class: "org.apache.storm.metrics2.reporters.ConsoleStormReporter" + report.period: 10 + report.period.units: "SECONDS" + locale: "en-US" + rate.unit: "SECONDS" + duration.unit: "SECONDS" + ``` + Default values will be used if they are not set or set to `null`. + diff --git a/docs/nimbus-ha-design.md b/docs/nimbus-ha-design.md new file mode 100644 index 00000000000..ae1a9366327 --- /dev/null +++ b/docs/nimbus-ha-design.md @@ -0,0 +1,222 @@ +--- +title: Highly Available Nimbus Design +layout: documentation +documentation: true +--- + +##Problem Statement: +Currently the storm master aka nimbus, is a process that runs on a single machine under supervision. In most cases the +nimbus failure is transient and it is restarted by the supervisor. However sometimes when disks fail and networks +partitions occur, nimbus goes down. Under these circumstances the topologies run normally but no new topologies can be +submitted, no existing topologies can be killed/deactivated/activated and if a supervisor node fails then the +reassignments are not performed resulting in performance degradation or topology failures. With this project we intend +to resolve this problem by running nimbus in a primary backup mode to guarantee that even if a nimbus server fails one +of the backups will take over. +##Requirements: +* Increase overall availability of nimbus. +* Allow nimbus hosts to leave and join the cluster at will any time. A newly joined host should auto catch up and join +the list of potential leaders automatically. +* No topology resubmissions required in case of nimbus fail overs. +* No active topology should ever be lost. + +##Leader Election: +The nimbus server will use the following interface: + +```java +public interface ILeaderElector { + /** + * queue up for leadership lock. The call returns immediately and the caller + * must check isLeader() to perform any leadership action. + */ + void addToLeaderLockQueue(); + + /** + * Removes the caller from the leader lock queue. If the caller is leader + * also releases the lock. + */ + void removeFromLeaderLockQueue(); + + /** + * + * @return true if the caller currently has the leader lock. + */ + boolean isLeader(); + + /** + * + * @return the current leader's address , throws exception if noone has has lock. + */ + InetSocketAddress getLeaderAddress(); + + /** + * + * @return list of current nimbus addresses, includes leader. + */ + List getAllNimbusAddresses(); +} +``` +On startup nimbus will check if it has code for all active topologies available locally. Once it gets to this state it +will call addToLeaderLockQueue() function. When a nimbus is notified to become a leader it will check if it has all the +code locally before assuming the leadership role. If any active topology code is missing, the node will not accept the +leadership role instead it will release the lock and wait till it has all the code before requeueing for leader lock. + +The first implementation will be Zookeeper based. If the zookeeper connection is lost/resetted resulting in loss of lock +or the spot in queue the implementation will take care of updating the state such that isLeader() will reflect the +current status.The leader like actions must finish in less than minimumOf(connectionTimeout, SessionTimeout) to ensure +the lock was held by nimbus for the entire duration of the action (Not sure if we want to just state this expectation +and ensure that zk configurations are set high enough which will result in higher failover time or we actually want to +create some sort of rollback mechanism for all actions, the second option needs a lot of code). If a nimbus that is not +leader receives a request that only a leader can perform it will throw a RunTimeException. + +Following steps describes a nimbus failover scenario: +* Let’s say we have 4 topologies running with 3 nimbus nodes and code-replication-factor = 2. We assume that the +invariant “The leader nimbus has code for all topologies locally” holds true at the beginning. nonleader-1 has code for +the first 2 topologies and nonLeader-2 has code for the other 2 topologies. +* Leader nimbus dies, hard disk failure so no recovery possible. +* nonLeader-1 gets a zookeeper notification to indicate it is now the new leader. before accepting the leadership it +checks if it has code available for all 4 topologies(these are topologies under /storm/storms/). It realizes it only has +code for 2 topologies so it relinquishes the lock and looks under /storm/code-distributor/topologyId to find out from +where can it download the code/metafile for the missing topologies. it finds entries for the leader nimbus and +nonleader-2. It will try downloading from both as part of its retry mechanism. +* nonLeader-2’s code sync thread also realizes that it is missing code for 2 topologies and follows the same process +described in step-3 to download code for missing topologies. +* eventually at least one of the nimbuses will have all the code locally and will accept leadership. +This sequence diagram describes how leader election and failover would work with multiple components. + +![Nimbus Fail Over](images/nimbus_ha_leader_election_and_failover.png) + +##Nimbus state store: + +Currently the nimbus stores 2 kind of data +* Meta information like supervisor info, assignment info which is stored in zookeeper +* Actual topology configs and jars that is stored on nimbus host’s local disk. + +To achieve fail over from primary to backup servers nimbus state/data needs to be replicated across all nimbus hosts or +needs to be stored in a distributed storage. Replicating the data correctly involves state management, consistency checks +and it is hard to test for correctness.However many storm users do not want to take extra dependency on another replicated +storage system like HDFS and still need high availability.Eventually, we want to move to the bittorrent protocol for code +distribution given the size of the jars and to achieve better scaling when the total number of supervisors is very high. +The current file system based model for code distribution works fine with systems that have file system like structure +but it fails to support a non file system based approach like bit torrent. To support bit torrent and all the file +system based replicated storage systems we propose the following interface: + +```java +/** + * Interface responsible to distribute code in the cluster. + */ +public interface ICodeDistributor { + /** + * Prepare this code distributor. + * @param conf + */ + void prepare(Map conf); + + /** + * This API will perform the actual upload of the code to the distributed implementation. + * The API should return a Meta file which should have enough information for downloader + * so it can download the code e.g. for bittorrent it will be a torrent file, in case of something + * like HDFS or s3 it might have the actual directory or paths for files to be downloaded. + * @param dirPath local directory where all the code to be distributed exists. + * @param topologyId the topologyId for which the meta file needs to be created. + * @return metaFile + */ + File upload(Path dirPath, String topologyId); + + /** + * Given the topologyId and metafile, download the actual code and return the downloaded file's list. + * @param topologyid + * @param metafile + * @param destDirPath the folder where all the files will be downloaded. + * @return + */ + List download(Path destDirPath, String topologyid, File metafile); + + /** + * Given the topologyId, returns number of hosts where the code has been replicated. + */ + int getReplicationCount(String topologyId); + + /** + * Performs the cleanup. + * @param topologyid + */ + void cleanup(String topologyid); + + /** + * Close this distributor. + * @param conf + */ + void close(Map conf); +} +``` +To support replication we will allow the user to define a code replication factor which would reflect number of nimbus +hosts to which the code must be replicated before starting the topology. With replication comes the issue of consistency. +We will treat zookeeper’s list of active topologies as our authority for topologies for which the code must exist on a +nimbus host. Any nimbus host that does not have all the code for all the topologies which are marked as active in zookeeper +will relinquish it’s lock so some other nimbus host could become leader. A background thread on all nimbus host will +continuously try to sync code from other hosts where the code was successfully replicated so eventually at least one nimbus +will accept leadership as long as at least one seed hosts exists for each active topology. + +Following steps describe code replication amongst nimbus hosts for a topology: +* When client uploads jar, nothing changes. +* When client submits a topology, leader nimbus calls code distributor’s upload function which will create a metafile stored +locally on leader nimbus. Leader nimbus will write new entries under /storm/code-distributor/topologyId to notify all +nonleader nimbuses that they should download this new code. +* We wait on the leader nimbus to ensure at least N non leader nimbus has the code replicated, with a user configurable timeout. +* When a non leader nimbus receives the notification about new code, it downloads the meta file from leader nimbus and then +downloads the real code by calling code distributor’s download function with metafile as input. +* Once non leader finishes downloading code, it will write an entry under /storm/code-distributor/topologyId to indicate +it is one of the possible places to download the code/metafile in case the leader nimbus dies. +* leader nimbus goes ahead and does all the usual things it does as part of submit topologies. + +The following sequence diagram describes the communication between different components involved in code distribution. + +![Nimbus HA Topology Submission](images/nimbus_ha_topology_submission.png) + +##Thrift and Rest API +In order to avoid workers/supervisors/ui talking to zookeeper for getting master nimbus address we are going to modify the +`getClusterInfo` API so it can also return nimbus information. getClusterInfo currently returns `ClusterSummary` instance +which has a list of `supervisorSummary` and a list of 'topologySummary` instances. We will add a list of `NimbusSummary` +to the `ClusterSummary`. See the structures below: + +```thrift +struct ClusterSummary { + 1: required list supervisors; + 3: required list topologies; + 4: required list nimbuses; +} + +struct NimbusSummary { + 1: required string host; + 2: required i32 port; + 3: required i32 uptime_secs; + 4: required bool isLeader; + 5: required string version; +} +``` + +This will be used by StormSubmitter, Nimbus clients,supervisors and ui to discover the current leaders and participating +nimbus hosts. Any nimbus host will be able to respond to these requests. The nimbus hosts can read this information once +from zookeeper and cache it and keep updating the cache when the watchers are fired to indicate any changes,which should +be rare in general case. + +## Configuration +You can use nimbus ha with default configuration , however the default configuration assumes a single nimbus host so it +trades off replication for lower topology submission latency. Depending on your use case you can adjust following configurations: +* storm.codedistributor.class : This is a string representing fully qualified class name of a class that implements +org.apache.storm.codedistributor.ICodeDistributor. The default is set to "org.apache.storm.codedistributor.LocalFileSystemCodeDistributor". +This class leverages local file system to store both meta files and code/configs. This class adds extra load on zookeeper as even after +downloading the code-distrbutor meta file it contacts zookeeper in order to figure out hosts from where it can download +actual code/config and to get the current replication count. An alternative is to use +"org.apache.storm.hdfs.ha.codedistributor.HDFSCodeDistributor" which relies on HDFS but does not add extra load on zookeeper and will +make topology submission faster. +* topology.min.replication.count : Minimum number of nimbus hosts where the code must be replicated before leader nimbus +can mark the topology as active and create assignments. Default is 1. +* topology.max.replication.wait.time.sec: Maximum wait time for the nimbus host replication to achieve the nimbus.min.replication.count. +Once this time is elapsed nimbus will go ahead and perform topology activation tasks even if required nimbus.min.replication.count is not achieved. +The default is 60 seconds, a value of -1 indicates to wait for ever. +*nimbus.code.sync.freq.secs: frequency at which the background thread on nimbus which syncs code for locally missing topologies will run. default is 5 minutes. + +Note: Even though all nimbus hosts have watchers on zookeeper to be notified immediately as soon as a new topology is available for code +download, the callback pretty much never results in code download. In practice we have observed that the desired replication is only achieved once the background-thread runs. +So you should expect your topology submission time to be somewhere between 0 to (2 * nimbus.code.sync.freq.secs) for any nimbus.min.replication.count > 1. diff --git a/docs/storm-hdfs.md b/docs/storm-hdfs.md new file mode 100644 index 00000000000..219a137af52 --- /dev/null +++ b/docs/storm-hdfs.md @@ -0,0 +1,582 @@ +--- +title: Storm HDFS Integration +layout: documentation +documentation: true +--- + +Storm components for interacting with HDFS file systems + + +# HDFS Bolt +## Usage +The following example will write pipe("|")-delimited files to the HDFS path hdfs://localhost:54310/foo. After every +1,000 tuples it will sync filesystem, making that data visible to other HDFS clients. It will rotate files when they +reach 5 megabytes in size. + +```java +// use "|" instead of "," for field delimiter +RecordFormat format = new DelimitedRecordFormat() + .withFieldDelimiter("|"); + +// sync the filesystem after every 1k tuples +SyncPolicy syncPolicy = new CountSyncPolicy(1000); + +// rotate files when they reach 5MB +FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, Units.MB); + +FileNameFormat fileNameFormat = new DefaultFileNameFormat() + .withPath("/foo/"); + +HdfsBolt bolt = new HdfsBolt() + .withFsUrl("hdfs://localhost:54310") + .withFileNameFormat(fileNameFormat) + .withRecordFormat(format) + .withRotationPolicy(rotationPolicy) + .withSyncPolicy(syncPolicy); +``` + + +### Packaging a Topology +When packaging your topology, it's important that you use the [maven-shade-plugin]() as opposed to the +[maven-assembly-plugin](). + +The shade plugin provides facilities for merging JAR manifest entries, which the hadoop client leverages for URL scheme +resolution. + +If you experience errors such as the following: + +``` +java.lang.RuntimeException: Error preparing HdfsBolt: No FileSystem for scheme: hdfs +``` + +it's an indication that your topology jar file isn't packaged properly. + +If you are using maven to create your topology jar, you should use the following `maven-shade-plugin` configuration to +create your topology jar: + +```xml + + org.apache.maven.plugins + maven-shade-plugin + 1.4 + + true + + + + package + + shade + + + + + + + + + + + + + +``` + +### Specifying a Hadoop Version +By default, storm-hdfs uses the following Hadoop dependencies: + +```xml + + org.apache.hadoop + hadoop-client + 2.6.1 + + + org.slf4j + slf4j-log4j12 + + + + + org.apache.hadoop + hadoop-hdfs + 2.6.1 + + + org.slf4j + slf4j-log4j12 + + + +``` + +If you are using a different version of Hadoop, you should exclude the Hadoop libraries from the storm-hdfs dependency +and add the dependencies for your preferred version in your pom. + +Hadoop client version incompatibilites can manifest as errors like: + +``` +com.google.protobuf.InvalidProtocolBufferException: Protocol message contained an invalid tag (zero) +``` + +## HDFS Bolt Customization + +### Record Formats +Record format can be controlled by providing an implementation of the `org.apache.storm.hdfs.format.RecordFormat` +interface: + +```java +public interface RecordFormat extends Serializable { + byte[] format(Tuple tuple); +} +``` + +The provided `org.apache.storm.hdfs.format.DelimitedRecordFormat` is capable of producing formats such as CSV and +tab-delimited files. + + +### File Naming +File naming can be controlled by providing an implementation of the `org.apache.storm.hdfs.format.FileNameFormat` +interface: + +```java +public interface FileNameFormat extends Serializable { + void prepare(Map conf, TopologyContext topologyContext); + String getName(long rotation, long timeStamp); + String getPath(); +} +``` + +The provided `org.apache.storm.hdfs.format.DefaultFileNameFormat` will create file names with the following format: + + {prefix}{componentId}-{taskId}-{rotationNum}-{timestamp}{extension} + +For example: + + MyBolt-5-7-1390579837830.txt + +By default, prefix is empty and extenstion is ".txt". + +**New FileNameFormat:** + +The new provided `org.apache.storm.hdfs.format.SimpleFileNameFormat` and `org.apache.storm.hdfs.trident.format.SimpleFileNameFormat` are more flexible, and the `withName` method support parameters as following: + +* $TIME - current time. use `withTimeFormat` to format. +* $NUM - rotation number +* $HOST - local host name +* $PARTITION - partition index (`org.apache.storm.hdfs.trident.format.SimpleFileNameFormat` only) +* $COMPONENT - component id (`org.apache.storm.hdfs.format.SimpleFileNameFormat` only) +* $TASK - task id (`org.apache.storm.hdfs.format.SimpleFileNameFormat` only) + +eg: `seq.$TIME.$HOST.$COMPONENT.$NUM.dat` + +The default file `name` is `$TIME.$NUM.txt`, and the default `timeFormat` is `yyyyMMddHHmmss`. + + +### Sync Policies +Sync policies allow you to control when buffered data is flushed to the underlying filesystem (thus making it available +to clients reading the data) by implementing the `org.apache.storm.hdfs.sync.SyncPolicy` interface: + +```java +public interface SyncPolicy extends Serializable { + boolean mark(Tuple tuple, long offset); + void reset(); +} +``` +The `HdfsBolt` will call the `mark()` method for every tuple it processes. Returning `true` will trigger the `HdfsBolt` +to perform a sync/flush, after which it will call the `reset()` method. + +The `org.apache.storm.hdfs.sync.CountSyncPolicy` class simply triggers a sync after the specified number of tuples have +been processed. + +### File Rotation Policies +Similar to sync policies, file rotation policies allow you to control when data files are rotated by providing a +`org.apache.storm.hdfs.rotation.FileRotation` interface: + +```java +public interface FileRotationPolicy extends Serializable { + boolean mark(Tuple tuple, long offset); + void reset(); + FileRotationPolicy copy(); +} +``` + +The `org.apache.storm.hdfs.rotation.FileSizeRotationPolicy` implementation allows you to trigger file rotation when +data files reach a specific file size: + +```java +FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, Units.MB); +``` + +### File Rotation Actions +Both the HDFS bolt and Trident State implementation allow you to register any number of `RotationAction`s. +What `RotationAction`s do is provide a hook to allow you to perform some action right after a file is rotated. For +example, moving a file to a different location or renaming it. + + +```java +public interface RotationAction extends Serializable { + void execute(FileSystem fileSystem, Path filePath) throws IOException; +} +``` + +Storm-HDFS includes a simple action that will move a file after rotation: + +```java +public class MoveFileAction implements RotationAction { + private static final Logger LOG = LoggerFactory.getLogger(MoveFileAction.class); + + private String destination; + + public MoveFileAction withDestination(String destDir){ + destination = destDir; + return this; + } + + @Override + public void execute(FileSystem fileSystem, Path filePath) throws IOException { + Path destPath = new Path(destination, filePath.getName()); + LOG.info("Moving file {} to {}", filePath, destPath); + boolean success = fileSystem.rename(filePath, destPath); + return; + } +} +``` + +If you are using Trident and sequence files you can do something like this: + +```java + HdfsState.Options seqOpts = new HdfsState.SequenceFileOptions() + .withFileNameFormat(fileNameFormat) + .withSequenceFormat(new DefaultSequenceFormat("key", "data")) + .withRotationPolicy(rotationPolicy) + .withFsUrl("hdfs://localhost:54310") + .addRotationAction(new MoveFileAction().withDestination("/dest2/")); +``` + +### Data Partitioning +Data can be partitioned to different HDFS directories based on characteristics of the tuple being processed or purely +external factors, such as system time. To partition your your data, write a class that implements the ```Partitioner``` +interface and pass it to the withPartitioner() method of your bolt. The getPartitionPath() method returns a partition +path for a given tuple. + +Here's an example of a Partitioner that operates on a specific field of data: + +```java + + Partitioner partitoner = new Partitioner() { + @Override + public String getPartitionPath(Tuple tuple) { + return Path.SEPARATOR + tuple.getStringByField("city"); + } + }; +``` + +## HDFS Bolt Support for HDFS Sequence Files + +The `org.apache.storm.hdfs.bolt.SequenceFileBolt` class allows you to write storm data to HDFS sequence files: + +```java + // sync the filesystem after every 1k tuples + SyncPolicy syncPolicy = new CountSyncPolicy(1000); + + // rotate files when they reach 5MB + FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, Units.MB); + + FileNameFormat fileNameFormat = new DefaultFileNameFormat() + .withExtension(".seq") + .withPath("/data/"); + + // create sequence format instance. + DefaultSequenceFormat format = new DefaultSequenceFormat("timestamp", "sentence"); + + SequenceFileBolt bolt = new SequenceFileBolt() + .withFsUrl("hdfs://localhost:54310") + .withFileNameFormat(fileNameFormat) + .withSequenceFormat(format) + .withRotationPolicy(rotationPolicy) + .withSyncPolicy(syncPolicy) + .withCompressionType(SequenceFile.CompressionType.RECORD) + .withCompressionCodec("deflate"); +``` + +The `SequenceFileBolt` requires that you provide a `org.apache.storm.hdfs.bolt.format.SequenceFormat` that maps tuples to +key/value pairs: + +```java +public interface SequenceFormat extends Serializable { + Class keyClass(); + Class valueClass(); + + Writable key(Tuple tuple); + Writable value(Tuple tuple); +} +``` + +## HDFS Bolt Support for Avro Files + +The `org.apache.storm.hdfs.bolt.AvroGenericRecordBolt` class allows you to write Avro objects directly to HDFS: + +```java + // sync the filesystem after every 1k tuples + SyncPolicy syncPolicy = new CountSyncPolicy(1000); + + // rotate files when they reach 5MB + FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, Units.MB); + + FileNameFormat fileNameFormat = new DefaultFileNameFormat() + .withExtension(".avro") + .withPath("/data/"); + + // create sequence format instance. + DefaultSequenceFormat format = new DefaultSequenceFormat("timestamp", "sentence"); + + AvroGenericRecordBolt bolt = new AvroGenericRecordBolt() + .withFsUrl("hdfs://localhost:54310") + .withFileNameFormat(fileNameFormat) + .withRotationPolicy(rotationPolicy) + .withSyncPolicy(syncPolicy); +``` + +The avro bolt will write records to separate files based on the schema of the record being processed. In other words, +if the bolt receives records with two different schemas, it will write to two separate files. Each file will be rotatated +in accordance with the specified rotation policy. If a large number of Avro schemas are expected, then the bolt should +be configured with a maximum number of open files at least equal to the number of schemas expected to prevent excessive +file open/close/create operations. + +To use this bolt you **must** register the appropriate Kryo serializers with your topology configuration. A convenience +method is provided for this: + +`AvroUtils.addAvroKryoSerializations(conf);` + +By default Storm will use the ```GenericAvroSerializer``` to handle serialization. This will work, but there are much +faster options available if you can pre-define the schemas you will be using or utilize an external schema registry. An +implementation using the Confluent Schema Registry is provided, but others can be implemented and provided to Storm. +Please see the javadoc for classes in org.apache.storm.hdfs.avro for information about using the built-in options or +creating your own. + + +## HDFS Bolt support for Trident API +storm-hdfs also includes a Trident `state` implementation for writing data to HDFS, with an API that closely mirrors +that of the bolts. + + ```java + Fields hdfsFields = new Fields("field1", "field2"); + + FileNameFormat fileNameFormat = new DefaultFileNameFormat() + .withPath("/trident") + .withPrefix("trident") + .withExtension(".txt"); + + RecordFormat recordFormat = new DelimitedRecordFormat() + .withFields(hdfsFields); + + FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, FileSizeRotationPolicy.Units.MB); + + HdfsState.Options options = new HdfsState.HdfsFileOptions() + .withFileNameFormat(fileNameFormat) + .withRecordFormat(recordFormat) + .withRotationPolicy(rotationPolicy) + .withFsUrl("hdfs://localhost:54310"); + + StateFactory factory = new HdfsStateFactory().withOptions(options); + + TridentState state = stream + .partitionPersist(factory, hdfsFields, new HdfsUpdater(), new Fields()); + ``` + + To use the sequence file `State` implementation, use the `HdfsState.SequenceFileOptions`: + + ```java + HdfsState.Options seqOpts = new HdfsState.SequenceFileOptions() + .withFileNameFormat(fileNameFormat) + .withSequenceFormat(new DefaultSequenceFormat("key", "data")) + .withRotationPolicy(rotationPolicy) + .withFsUrl("hdfs://localhost:54310") + .addRotationAction(new MoveFileAction().toDestination("/dest2/")); +``` + +### Note +Whenever a batch is replayed by storm (due to failures), the trident state implementation automatically removes +duplicates from the current data file by copying the data up to the last transaction to another file. Since this +operation involves a lot of data copy, ensure that the data files are rotated at reasonable sizes with `FileSizeRotationPolicy` +and at reasonable intervals with `TimedRotationPolicy` so that the recovery can complete within topology.message.timeout.secs. + +Also note with `TimedRotationPolicy` the files are never rotated in the middle of a batch even if the timer ticks, +but only when a batch completes so that complete batches can be efficiently recovered in case of failures. + +##Working with Secure HDFS +If your topology is going to interact with secure HDFS, your bolts/states needs to be authenticated by NameNode. We +currently have 2 options to support this: + +### Using HDFS delegation tokens +Your administrator can configure nimbus to automatically get delegation tokens on behalf of the topology submitter user. The nimbus should be started with following configurations: + +``` +nimbus.autocredential.plugins.classes : ["org.apache.storm.hdfs.security.AutoHDFS"] +nimbus.credential.renewers.classes : ["org.apache.storm.hdfs.security.AutoHDFS"] +hdfs.keytab.file: "/path/to/keytab/on/nimbus" (This is the keytab of hdfs super user that can impersonate other users.) +hdfs.kerberos.principal: "superuser@EXAMPLE.com" +nimbus.credential.renewers.freq.secs : 82800 (23 hours, hdfs tokens needs to be renewed every 24 hours so this value should be less then 24 hours.) +topology.hdfs.uri:"hdfs://host:port" (This is an optional config, by default we will use value of "fs.defaultFS" property specified in hadoop's core-site.xml) +``` + +Your topology configuration should have: + +``` +topology.auto-credentials :["org.apache.storm.hdfs.common.security.AutoHDFS"] +``` + +If nimbus did not have the above configuration you need to add and then restart it. Ensure the hadoop configuration +files (core-site.xml and hdfs-site.xml) and the storm-hdfs jar with all the dependencies is present in nimbus's classpath. + +As an alternative to adding the configuration files (core-site.xml and hdfs-site.xml) to the classpath, you could specify the configurations +as a part of the topology configuration. E.g. in you custom storm.yaml (or -c option while submitting the topology), + +``` +hdfsCredentialsConfigKeys : ["cluster1", "cluster2"] (the hdfs clusters you want to fetch the tokens from) +"cluster1": {"config1": "value1", "config2": "value2", ... } (A map of config key-values specific to cluster1) +"cluster2": {"config1": "value1", "hdfs.keytab.file": "/path/to/keytab/for/cluster2/on/nimubs", "hdfs.kerberos.principal": "cluster2user@EXAMPLE.com"} (here along with other configs, we have custom keytab and principal for "cluster2" which will override the keytab/principal specified at topology level) +``` + +Instead of specifying key values you may also directly specify the resource files for e.g., + +``` +"cluster1": {"resources": ["/path/to/core-site1.xml", "/path/to/hdfs-site1.xml"]} +"cluster2": {"resources": ["/path/to/core-site2.xml", "/path/to/hdfs-site2.xml"]} +``` + +Storm will download the tokens separately for each of the clusters and populate it into the subject and also renew the tokens periodically. This way it would be possible to run multiple bolts connecting to separate HDFS cluster within the same topology. + +Nimbus will use the keytab and principal specified in the config to authenticate with Namenode. From then on for every +topology submission, nimbus will impersonate the topology submitter user and acquire delegation tokens on behalf of the +topology submitter user. If topology was started with topology.auto-credentials set to AutoHDFS, nimbus will push the +delegation tokens to all the workers for your topology and the hdfs bolt/state will authenticate with namenode using +these tokens. + +As nimbus is impersonating topology submitter user, you need to ensure the user specified in hdfs.kerberos.principal +has permissions to acquire tokens on behalf of other users. To achieve this you need to follow configuration directions +listed on this link +http://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/Superusers.html + +You can read about setting up secure HDFS here: http://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/SecureMode.html. + +### Using keytabs on all worker hosts +If you have distributed the keytab files for hdfs user on all potential worker hosts then you can use this method. You should specify a +hdfs config key using the method HdfsBolt/State.withconfigKey("somekey") and the value map of this key should have following 2 properties: + +hdfs.keytab.file: "/path/to/keytab/" +hdfs.kerberos.principal: "user@EXAMPLE.com" + +On worker hosts the bolt/trident-state code will use the keytab file with principal provided in the config to authenticate with +Namenode. This method is little dangerous as you need to ensure all workers have the keytab file at the same location and you need +to remember this as you bring up new hosts in the cluster. + +--- + +# HDFS Spout + +Hdfs spout is intended to allow feeding data into Storm from a HDFS directory. +It will actively monitor the directory to consume any new files that appear in the directory. +HDFS spout does not support Trident currently. + +**Impt**: Hdfs spout assumes that the files being made visible to it in the monitored directory +are NOT actively being written to. Only after a file is completely written should it be made +visible to the spout. This can be achieved by either writing the files out to another directory +and once completely written, move it to the monitored directory. Alternatively the file +can be created with a '.ignore' suffix in the monitored directory and after data is completely +written, rename it without the suffix. File names with a '.ignore' suffix are ignored +by the spout. + +When the spout is actively consuming a file, it renames the file with a '.inprogress' suffix. +After consuming all the contents in the file, the file will be moved to a configurable *done* +directory and the '.inprogress' suffix will be dropped. + +**Concurrency** If multiple spout instances are used in the topology, each instance will consume +a different file. Synchronization among spout instances is done using lock files created in a +(by default) '.lock' subdirectory under the monitored directory. A file with the same name +as the file being consumed (without the in progress suffix) is created in the lock directory. +Once the file is completely consumed, the corresponding lock file is deleted. + +**Recovery from failure** +Periodically, the spout also records progress information wrt to how much of the file has been +consumed in the lock file. In case of an crash of the spout instance (or force kill of topology) +another spout can take over the file and resume from the location recorded in the lock file. + +Certain error conditions (such spout crashing) can leave behind lock files without deleting them. +Such a stale lock file also indicates that the corresponding input file has also not been completely +processed. When detected, ownership of such stale lock files will be transferred to another spout. +The configuration 'hdfsspout.lock.timeout.sec' is used to specify the duration of inactivity after +which lock files should be considered stale. For lock file ownership transfer to succeed, the HDFS +lease on the file (from prev lock owner) should have expired. Spouts scan for stale lock files +before selecting the next file for consumption. + +**Lock on *.lock* Directory** +Hdfs spout instances create a *DIRLOCK* file in the .lock directory to co-ordinate certain accesses to +the .lock dir itself. A spout will try to create it when it needs access to the .lock directory and +then delete it when done. In error conditions such as a topology crash, force kill or untimely death +of a spout, this file may not get deleted. Future running instances of the spout will eventually recover +this once the DIRLOCK file becomes stale due to inactivity for hdfsspout.lock.timeout.sec seconds. + +## Usage + +The following example creates an HDFS spout that reads text files from HDFS path hdfs://localhost:54310/source. + +```java +// Instantiate spout to read text files +HdfsSpout textReaderSpout = new HdfsSpout().setReaderType("text") + .withOutputFields(TextFileReader.defaultFields) + .setHdfsUri("hdfs://localhost:54310") // required + .setSourceDir("/data/in") // required + .setArchiveDir("/data/done") // required + .setBadFilesDir("/data/badfiles"); // required +// If using Kerberos +HashMap hdfsSettings = new HashMap(); +hdfsSettings.put("hdfs.keytab.file", "/path/to/keytab"); +hdfsSettings.put("hdfs.kerberos.principal","user@EXAMPLE.com"); + +textReaderSpout.setHdfsClientSettings(hdfsSettings); + +// Create topology +TopologyBuilder builder = new TopologyBuilder(); +builder.setSpout("hdfsspout", textReaderSpout, SPOUT_NUM); + +// Setup bolts and wire up topology + ..snip.. + +// Submit topology with config +Config conf = new Config(); +StormSubmitter.submitTopologyWithProgressBar("topologyName", conf, builder.createTopology()); +``` + +A sample topology HdfsSpoutTopology is provided in storm-starter module. + +## Configuration Settings +Below is a list of HdfsSpout member functions used for configuration. The equivalent config is also possible via Config object passed in during submitting topology. +However, the later mechanism is deprecated as it does not allow multiple Hdfs spouts with differing settings. : + + +Only methods mentioned in **bold** are required. + +| Method | Alternative config name (deprecated) | Default | Description | +|----------------------------|--------------------------------------|-------------|-------------| +| **.setReaderType()** |~~hdfsspout.reader.type~~ | | Determines which file reader to use. Set to 'seq' for reading sequence files or 'text' for text files. Set to a fully qualified class name if using a custom file reader class (that implements interface org.apache.storm.hdfs.spout.FileReader)| +| **.withOutputFields()** | | | Sets the names for the output fields for the spout. The number of fields depends upon the reader being used. For convenience, built-in reader types expose a static member called `defaultFields` that can be used for setting this.| +| **.setHdfsUri()** |~~hdfsspout.hdfs~~ | | HDFS URI for the hdfs Name node. Example: hdfs://namenodehost:8020| +| **.setSourceDir()** |~~hdfsspout.source.dir~~ | | HDFS directory from where to read files. E.g. /data/inputdir| +| **.setArchiveDir()** |~~hdfsspout.archive.dir~~ | | After a file is processed completely it will be moved to this HDFS directory. If this directory does not exist it will be created. E.g. /data/done| +| **.setBadFilesDir()** |~~hdfsspout.badfiles.dir~~ | | if there is an error parsing a file's contents, the file is moved to this location. If this directory does not exist it will be created. E.g. /data/badfiles | +| .setLockDir() |~~hdfsspout.lock.dir~~ | '.lock' subdirectory under hdfsspout.source.dir | Dir in which lock files will be created. Concurrent HDFS spout instances synchronize using *lock* files. Before processing a file the spout instance creates a lock file in this directory with same name as input file and deletes this lock file after processing the file. Spouts also periodically makes a note of their progress (wrt reading the input file) in the lock file so that another spout instance can resume progress on the same file if the spout dies for any reason.| +| .setIgnoreSuffix() |~~hdfsspout.ignore.suffix~~ | .ignore | File names with this suffix in the in the hdfsspout.source.dir location will not be processed| +| .setCommitFrequencyCount() |~~hdfsspout.commit.count~~ | 20000 | Record progress in the lock file after these many records are processed. If set to 0, this criterion will not be used. | +| .setCommitFrequencySec() |~~hdfsspout.commit.sec~~ | 10 | Record progress in the lock file after these many seconds have elapsed. Must be greater than 0 | +| .setMaxOutstanding() |~~hdfsspout.max.outstanding~~ | 10000 | Limits the number of unACKed tuples by pausing tuple generation (if ACKers are used in the topology) | +| .setLockTimeoutSec() |~~hdfsspout.lock.timeout.sec~~ | 5 minutes | Duration of inactivity after which a lock file is considered to be abandoned and ready for another spout to take ownership | +| .setClocksInSync() |~~hdfsspout.clocks.insync~~ | true | Indicates whether clocks on the storm machines are in sync (using services like NTP). Used for detecting stale locks. | +| .withConfigKey() | | | Optional setting. Overrides the default key name ('hdfs.config', see below) used for specifying HDFS client configs. | +| .setHdfsClientSettings() |~~hdfs.config~~ (unless changed via withConfigKey)| | Set it to a Map of Key/value pairs indicating the HDFS settings to be used. For example, keytab and principal could be set using this. See section **Using keytabs on all worker hosts** under HDFS bolt below.| +| .withOutputStream() | | | Name of output stream. If set, the tuples will be emited to the specified stream. Else tuples will be emited to the default output stream | + +--- diff --git a/docs/storm-jdbc.md b/docs/storm-jdbc.md new file mode 100644 index 00000000000..0cb23531dac --- /dev/null +++ b/docs/storm-jdbc.md @@ -0,0 +1,288 @@ +--- +title: Storm JDBC Integration +layout: documentation +documentation: true +--- + +Storm/Trident integration for JDBC. This package includes the core bolts and trident states that allows a storm topology +to either insert storm tuples in a database table or to execute select queries against a database and enrich tuples +in a storm topology. + +**Note**: Throughout the examples below, we make use of com.google.common.collect.Lists and com.google.common.collect.Maps. + +## Inserting into a database. +The bolt and trident state included in this package for inserting data into a database tables are tied to a single table. + +### ConnectionProvider +An interface that should be implemented by different connection pooling mechanism `org.apache.storm.jdbc.common.ConnectionProvider` + +```java +public interface ConnectionProvider extends Serializable { + /** + * method must be idempotent. + */ + void prepare(); + + /** + * + * @return a DB connection over which the queries can be executed. + */ + Connection getConnection(); + + /** + * called once when the system is shutting down, should be idempotent. + */ + void cleanup(); +} +``` + +Out of the box we support `org.apache.storm.jdbc.common.HikariCPConnectionProvider` which is an implementation that uses HikariCP. + +###JdbcMapper +The main API for inserting data in a table using JDBC is the `org.apache.storm.jdbc.mapper.JdbcMapper` interface: + +```java +public interface JdbcMapper extends Serializable { + List getColumns(ITuple tuple); +} +``` + +The `getColumns()` method defines how a storm tuple maps to a list of columns representing a row in a database. +**The order of the returned list is important. The place holders in the supplied queries are resolved in the same order as returned list.** +For example if the user supplied insert query is `insert into user(user_id, user_name, create_date) values (?,?, now())` the 1st item +of the returned list of `getColumns` method will map to the 1st place holder and the 2nd to the 2nd and so on. We do not parse +the supplied queries to try and resolve place holder by column names. Not making any assumptions about the query syntax allows this connector +to be used by some non-standard sql frameworks like Pheonix which only supports upsert into. + +### JdbcInsertBolt +To use the `JdbcInsertBolt`, you construct an instance of it by specifying a `ConnectionProvider` implementation +and a `JdbcMapper` implementation that converts storm tuple to DB row. In addition, you must either supply +a table name using `withTableName` method or an insert query using `withInsertQuery`. +If you specify a insert query you should ensure that your `JdbcMapper` implementation will return a list of columns in the same order as in your insert query. +You can optionally specify a query timeout seconds param that specifies max seconds an insert query can take. +The default is set to value of topology.message.timeout.secs and a value of -1 will indicate not to set any query timeout. +You should set the query timeout value to be <= topology.message.timeout.secs. + +```java +Map hikariConfigMap = Maps.newHashMap(); +hikariConfigMap.put("dataSourceClassName","com.mysql.jdbc.jdbc2.optional.MysqlDataSource"); +hikariConfigMap.put("dataSource.url", "jdbc:mysql://localhost/test"); +hikariConfigMap.put("dataSource.user","root"); +hikariConfigMap.put("dataSource.password","password"); +ConnectionProvider connectionProvider = new HikariCPConnectionProvider(hikariConfigMap); + +String tableName = "user_details"; +JdbcMapper simpleJdbcMapper = new SimpleJdbcMapper(tableName, connectionProvider); + +JdbcInsertBolt userPersistenceBolt = new JdbcInsertBolt(connectionProvider, simpleJdbcMapper) + .withTableName("user") + .withQueryTimeoutSecs(30); + Or +JdbcInsertBolt userPersistenceBolt = new JdbcInsertBolt(connectionProvider, simpleJdbcMapper) + .withInsertQuery("insert into user values (?,?)") + .withQueryTimeoutSecs(30); +``` + +### SimpleJdbcMapper +`storm-jdbc` includes a general purpose `JdbcMapper` implementation called `SimpleJdbcMapper` that can map Storm +tuple to a Database row. `SimpleJdbcMapper` assumes that the storm tuple has fields with same name as the column name in +the database table that you intend to write to. + +To use `SimpleJdbcMapper`, you simply tell it the tableName that you want to write to and provide a connectionProvider instance. + +The following code creates a `SimpleJdbcMapper` instance that: + +1. Will allow the mapper to transform a storm tuple to a list of columns mapping to a row in table test.user_details. +2. Will use the provided HikariCP configuration to establish a connection pool with specified Database configuration and +automatically figure out the column names and corresponding data types of the table that you intend to write to. +Please see https://github.com/brettwooldridge/HikariCP#configuration-knobs-baby to learn more about hikari configuration properties. + +```java +Map hikariConfigMap = Maps.newHashMap(); +hikariConfigMap.put("dataSourceClassName","com.mysql.jdbc.jdbc2.optional.MysqlDataSource"); +hikariConfigMap.put("dataSource.url", "jdbc:mysql://localhost/test"); +hikariConfigMap.put("dataSource.user","root"); +hikariConfigMap.put("dataSource.password","password"); +ConnectionProvider connectionProvider = new HikariCPConnectionProvider(hikariConfigMap); +String tableName = "user_details"; +JdbcMapper simpleJdbcMapper = new SimpleJdbcMapper(tableName, connectionProvider); +``` +The mapper initialized in the example above assumes a storm tuple has value for all the columns of the table you intend to insert data into and its `getColumn` +method will return the columns in the order in which Jdbc connection instance's `connection.getMetaData().getColumns();` method returns them. + +**If you specified your own insert query to `JdbcInsertBolt` you must initialize `SimpleJdbcMapper` with explicit columnschema such that the schema has columns in the same order as your insert queries.** +For example if your insert query is `Insert into user (user_id, user_name) values (?,?)` then your `SimpleJdbcMapper` should be initialized with the following statements: + +```java +List columnSchema = Lists.newArrayList( + new Column("user_id", java.sql.Types.INTEGER), + new Column("user_name", java.sql.Types.VARCHAR)); +JdbcMapper simpleJdbcMapper = new SimpleJdbcMapper(columnSchema); +``` + +If your storm tuple only has fields for a subset of columns i.e. if some of the columns in your table have default values and you want to only insert values for columns with no default values you can enforce the behavior by initializing the +`SimpleJdbcMapper` with explicit columnschema. For example, if you have a user_details table `create table if not exists user_details (user_id integer, user_name varchar(100), dept_name varchar(100), create_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP);` +In this table the create_time column has a default value. To ensure only the columns with no default values are inserted +you can initialize the `jdbcMapper` as below: + +```java +List columnSchema = Lists.newArrayList( + new Column("user_id", java.sql.Types.INTEGER), + new Column("user_name", java.sql.Types.VARCHAR), + new Column("dept_name", java.sql.Types.VARCHAR)); +JdbcMapper simpleJdbcMapper = new SimpleJdbcMapper(columnSchema); +``` +### JdbcTridentState +We also support a trident persistent state that can be used with trident topologies. To create a jdbc persistent trident +state you need to initialize it with the table name or an insert query, the JdbcMapper instance and connection provider instance. +See the example below: + +```java +JdbcState.Options options = new JdbcState.Options() + .withConnectionProvider(connectionProvider) + .withMapper(jdbcMapper) + .withTableName("user_details") + .withQueryTimeoutSecs(30); +JdbcStateFactory jdbcStateFactory = new JdbcStateFactory(options); +``` +similar to `JdbcInsertBolt` you can specify a custom insert query using `withInsertQuery` instead of specifying a table name. + +## Lookup from Database +We support `select` queries from databases to allow enrichment of storm tuples in a topology. The main API for +executing select queries against a database using JDBC is the `org.apache.storm.jdbc.mapper.JdbcLookupMapper` interface: + +```java + void declareOutputFields(OutputFieldsDeclarer declarer); + List getColumns(ITuple tuple); + List toTuple(ITuple input, List columns); +``` + +The `declareOutputFields` method is used to indicate what fields will be emitted as part of output tuple of processing a storm +tuple. + +The `getColumns` method specifies the place holder columns in a select query and their SQL type and the value to use. +For example in the user_details table mentioned above if you were executing a query `select user_name from user_details where +user_id = ? and create_time > ?` the `getColumns` method would take a storm input tuple and return a List containing two items. +The first instance of `Column` type's `getValue()` method will be used as the value of `user_id` to lookup for and the +second instance of `Column` type's `getValue()` method will be used as the value of `create_time`. +**Note: the order in the returned list determines the place holder's value. In other words the first item in the list maps +to first `?` in select query, the second item to second `?` in query and so on.** + +The `toTuple` method takes in the input tuple and a list of columns representing a DB row as a result of the select query +and returns a list of values to be emitted. +**Please note that it returns a list of `Values` and not just a single instance of `Values`.** +This allows a for a single DB row to be mapped to multiple output storm tuples. + +###SimpleJdbcLookupMapper +`storm-jdbc` includes a general purpose `JdbcLookupMapper` implementation called `SimpleJdbcLookupMapper`. + +To use `SimpleJdbcMapper`, you have to initialize it with the fields that will be outputted by your bolt and the list of +columns that are used in your select query as place holder. The following example shows initialization of a `SimpleJdbcLookupMapper` +that declares `user_id,user_name,create_date` as output fields and `user_id` as the place holder column in select query. +SimpleJdbcMapper assumes the field name in your tuple is equal to the place holder column name, i.e. in our example +`SimpleJdbcMapper` will look for a field `use_id` in the input tuple and use its value as the place holder's value in the +select query. For constructing output tuples, it looks for fields specified in `outputFields` in the input tuple first, +and if it is not found in input tuple then it looks at select query's output row for a column with same name as field name. +So in the example below if the input tuple had fields `user_id, create_date` and the select query was +`select user_name from user_details where user_id = ?`, For each input tuple `SimpleJdbcLookupMapper.getColumns(tuple)` +will return the value of `tuple.getValueByField("user_id")` which will be used as the value in `?` of select query. +For each output row from DB, `SimpleJdbcLookupMapper.toTuple()` will use the `user_id, create_date` from the input tuple as +is adding only `user_name` from the resulting row and returning these 3 fields as a single output tuple. + +```java +Fields outputFields = new Fields("user_id", "user_name", "create_date"); +List queryParamColumns = Lists.newArrayList(new Column("user_id", Types.INTEGER)); +this.jdbcLookupMapper = new SimpleJdbcLookupMapper(outputFields, queryParamColumns); +``` + +### JdbcLookupBolt +To use the `JdbcLookupBolt`, construct an instance of it using a `ConnectionProvider` instance, `JdbcLookupMapper` instance and the select query to execute. +You can optionally specify a query timeout seconds param that specifies max seconds the select query can take. +The default is set to value of topology.message.timeout.secs. You should set this value to be <= topology.message.timeout.secs. + +```java +String selectSql = "select user_name from user_details where user_id = ?"; +SimpleJdbcLookupMapper lookupMapper = new SimpleJdbcLookupMapper(outputFields, queryParamColumns) +JdbcLookupBolt userNameLookupBolt = new JdbcLookupBolt(connectionProvider, selectSql, lookupMapper) + .withQueryTimeoutSecs(30); +``` + +### JdbcTridentState for lookup +We also support a trident query state that can be used with trident topologies. + +```java +JdbcState.Options options = new JdbcState.Options() + .withConnectionProvider(connectionProvider) + .withJdbcLookupMapper(new SimpleJdbcLookupMapper(new Fields("user_name"), Lists.newArrayList(new Column("user_id", Types.INTEGER)))) + .withSelectQuery("select user_name from user_details where user_id = ?"); + .withQueryTimeoutSecs(30); +``` + +## Example: +A runnable example can be found in the `src/test/java/topology` directory. + +### Setup +* Ensure you have included JDBC implementation dependency for your chosen database as part of your build configuration. +* The test topologies executes the following queries so your intended DB must support these queries for test topologies +to work. + +```SQL +create table if not exists user (user_id integer, user_name varchar(100), dept_name varchar(100), create_date date); +create table if not exists department (dept_id integer, dept_name varchar(100)); +create table if not exists user_department (user_id integer, dept_id integer); +insert into department values (1, 'R&D'); +insert into department values (2, 'Finance'); +insert into department values (3, 'HR'); +insert into department values (4, 'Sales'); +insert into user_department values (1, 1); +insert into user_department values (2, 2); +insert into user_department values (3, 3); +insert into user_department values (4, 4); +select dept_name from department, user_department where department.dept_id = user_department.dept_id and user_department.user_id = ?; +``` +### Execution +Run the `org.apache.storm.jdbc.topology.UserPersistenceTopology` class using storm jar command. The class expects 5 args +storm jar org.apache.storm.jdbc.topology.UserPersistenceTopology [topology name] + +To make it work with Mysql, you can add the following to the pom.xml + +``` + + mysql + mysql-connector-java + 5.1.31 + +``` + +You can generate a single jar with dependencies using mvn assembly plugin. To use the plugin add the following to your pom.xml and execute +`mvn clean compile assembly:single` + +``` + + maven-assembly-plugin + + + + fully.qualified.MainClass + + + + jar-with-dependencies + + + +``` + +Mysql Example: + +``` +storm jar ~/repo/incubator-storm/external/storm-jdbc/target/storm-jdbc-0.10.0-SNAPSHOT-jar-with-dependencies.jar org.apache.storm.jdbc.topology.UserPersistenceTopology com.mysql.jdbc.jdbc2.optional.MysqlDataSource jdbc:mysql://localhost/test root password UserPersistenceTopology +``` + +You can execute a select query against the user table which should show newly inserted rows: + +``` +select * from user; +``` + +For trident you can view `org.apache.storm.jdbc.topology.UserPersistenceTridentTopology`. diff --git a/docs/storm-jms-example.md b/docs/storm-jms-example.md new file mode 100644 index 00000000000..36caf5eb426 --- /dev/null +++ b/docs/storm-jms-example.md @@ -0,0 +1,111 @@ +--- +title: Storm JMS Integration +layout: documentation +documentation: true +--- +## Example Storm JMS Topology + +The storm-jms source code contains an example project (in the "examples" directory) +builds a multi-bolt/multi-spout topology (depicted below) that uses the JMS Spout and JMS Bolt components. + +![picture alt](images/Storm-JMS-Example.png "Example JMS Topology") + +The green components represent instances of the storm-jms components. White components represent +"standard" Storm bolts (in the example these bolts are instances of `GenericBolt` which simply logs +information about the tuples it receives and emits). + +Grey arrows represent JMS messages, while black arrows represent the flow of Storm tuple objects. + +### JMS Transactions and Guaranteed Processing +The example is set up to be "transactional," meaning the JMS Spout will use Storm's guaranteed +processing capabilities to determine if a JMS Message should be acknowledged. Each bolt in the +topology will anchor to each tuple it receives. If every bolt successfully processes and acks +each tuple in the chain, the original JMS Message will be acknowledged, and the underlying +JMS implementation will not attempt re-delivery of the message. If a bolt fails to process/ack +a tuple, the JMS message will not be acknowledged, and the JMS implementation will queue the +message for redelivery. + +### Data Flow +The topology contains two chains: One originating from a JMS Spout connected to a Queue, and +another originating from a JMS Spout connected to a Topic. + +**Chain #1** + +1. The "JMS Queue Spout" receives a JMS Message object from the queue, and emits a +tuple to the "Intermediate Bolt" +2. The "Intermediate Bolt" emits a tuple to the "Final Bolt" and the "JMS Topic Bolt", and acks +the tuple it received. +3. The "Final Bolt" receives the tuple and simply acks it, it does not emit anything. +4. The "JMS Topic Bolt" receives a tuple, constructs a JMS Message from the tuple's values, +and publishes the message to a JMS Topic. +5. If the "JMS Topic Bolt" successfully publishes the JMS message, it will ack the tuple. +6. The "JMS Queue Spout" will receive notification if all bolts in the chain have acked +and acknowledge the original JMS Message. If one or more bolts in the chain fail to ack a tuple, the +"JMS Queue Spout" will not acknowledge the JMS message. + +**Chain #2** + +1. The "JMS Topic Spout" receives a JMS message from the topic and emits a tuple to "Another Bolt." +2. The "Another Bolt" receives and acks the tuple. +3. The "JMS Topic Spout" acknowledges the JMS message. + + +### Building the Example Topology + + $ cd storm-jms + $ mvn clean install + + + +### Running the Example Topology Locally + +The example uses ApacheMQ 5.4.0 for JMS. Download and install it from +[Apache ActiveMQ Downloads](http://activemq.apache.org/download.html). + +There is no specific configuration required, simply start ActiveMQ: + + $ [ACTIVEMQ_HOME]/bin/activemq + +Run the example topology from the `examples` directory: + + $ mvn exec:java + +When the topology runs, it will connect to ActiveMQ and the following JMS Destinations will be created: + + backtype.storm.contrib.example.queue + backtype.storm.contrib.example.topic + +To publish a message to the `backtype.storm.contrib.example.queue` queue: + +1. Open the ActiveMQ Queue admin console: http://localhost:8161/admin/queues.jsp +2. Click the [Send To](http://localhost:8161/admin/send.jsp?JMSDestination=backtype.storm.contrib.example.queue&JMSDestinationType=queue) +link for the `backtupe.storm.example.queue` queue entry. +3. On the "Send a JMS Message" form, select the "Persistent Delivery" checkbox, enter +some text for the message body, and click "Send". + + +In the terminal you should see the following among the output: + + DEBUG (backtype.storm.contrib.jms.bolt.JmsBolt:183) - Connecting JMS.. + DEBUG (backtype.storm.contrib.jms.spout.JmsSpout:213) - sending tuple: ActiveMQTextMessage {commandId = 5, responseRequired = true, messageId = ID:budreau.home-51286-1321074044423-2:4:1:1:1, originalDestination = null, originalTransactionId = null, producerId = ID:budreau.home-51286-1321074044423-2:4:1:1, destination = queue://backtype.storm.contrib.example.queue, transactionId = null, expiration = 0, timestamp = 1321735055910, arrival = 0, brokerInTime = 1321735055910, brokerOutTime = 1321735055921, correlationId = , replyTo = null, persistent = true, type = , priority = 0, groupID = null, groupSequence = 0, targetConsumerId = null, compressed = false, userID = null, content = null, marshalledProperties = org.apache.activemq.util.ByteSequence@6c27ca12, dataStructure = null, redeliveryCounter = 0, size = 0, properties = {secret=880412b7-de71-45dd-8a80-8132589ccd22}, readOnlyProperties = true, readOnlyBody = true, droppable = false, text = Hello storm-jms!} + DEBUG (backtype.storm.contrib.jms.spout.JmsSpout:219) - Requested deliveryMode: CLIENT_ACKNOWLEDGE + DEBUG (backtype.storm.contrib.jms.spout.JmsSpout:220) - Our deliveryMode: CLIENT_ACKNOWLEDGE + DEBUG (backtype.storm.contrib.jms.spout.JmsSpout:224) - Requesting acks. + DEBUG (backtype.storm.contrib.jms.example.GenericBolt:60) - [INTERMEDIATE_BOLT] Received message: source: 1:10, stream: 1, id: {-7100026097570233628=-7100026097570233628}, [Hello storm-jms!] + DEBUG (backtype.storm.contrib.jms.example.GenericBolt:66) - [INTERMEDIATE_BOLT] emitting: source: 1:10, stream: 1, id: {-7100026097570233628=-7100026097570233628}, [Hello storm-jms!] + DEBUG (backtype.storm.contrib.jms.example.GenericBolt:75) - [INTERMEDIATE_BOLT] ACKing tuple: source: 1:10, stream: 1, id: {-7100026097570233628=-7100026097570233628}, [Hello storm-jms!] + DEBUG (backtype.storm.contrib.jms.bolt.JmsBolt:136) - Tuple received. Sending JMS message. + DEBUG (backtype.storm.contrib.jms.example.GenericBolt:60) - [FINAL_BOLT] Received message: source: 2:2, stream: 1, id: {-7100026097570233628=-5393763013502927792}, [Hello storm-jms!] + DEBUG (backtype.storm.contrib.jms.example.GenericBolt:75) - [FINAL_BOLT] ACKing tuple: source: 2:2, stream: 1, id: {-7100026097570233628=-5393763013502927792}, [Hello storm-jms!] + DEBUG (backtype.storm.contrib.jms.bolt.JmsBolt:144) - ACKing tuple: source: 2:2, stream: 1, id: {-7100026097570233628=-9118586029611278300}, [Hello storm-jms!] + DEBUG (backtype.storm.contrib.jms.spout.JmsSpout:251) - JMS Message acked: ID:budreau.home-51286-1321074044423-2:4:1:1:1 + DEBUG (backtype.storm.contrib.jms.spout.JmsSpout:213) - sending tuple: ActiveMQTextMessage {commandId = 5, responseRequired = true, messageId = ID:budreau.home-60117-1321735025796-0:0:1:1:1, originalDestination = null, originalTransactionId = null, producerId = ID:budreau.home-60117-1321735025796-0:0:1:1, destination = topic://backtype.storm.contrib.example.topic, transactionId = null, expiration = 0, timestamp = 1321735056258, arrival = 0, brokerInTime = 1321735056260, brokerOutTime = 1321735056260, correlationId = null, replyTo = null, persistent = true, type = null, priority = 4, groupID = null, groupSequence = 0, targetConsumerId = null, compressed = false, userID = null, content = null, marshalledProperties = null, dataStructure = null, redeliveryCounter = 0, size = 0, properties = null, readOnlyProperties = true, readOnlyBody = true, droppable = false, text = source: 2:2, stream: 1, id: {-710002609757023... storm-jms!]} + DEBUG (backtype.storm.contrib.jms.spout.JmsSpout:219) - Requested deliveryMode: CLIENT_ACKNOWLEDGE + DEBUG (backtype.storm.contrib.jms.spout.JmsSpout:220) - Our deliveryMode: CLIENT_ACKNOWLEDGE + DEBUG (backtype.storm.contrib.jms.spout.JmsSpout:224) - Requesting acks. + DEBUG (backtype.storm.contrib.jms.example.GenericBolt:60) - [ANOTHER_BOLT] Received message: source: 5:9, stream: 1, id: {-5117078009445186058=-5117078009445186058}, [source: 2:2, stream: 1, id: {-7100026097570233628=-9118586029611278300}, [Hello storm-jms!]] + DEBUG (backtype.storm.contrib.jms.example.GenericBolt:75) - [ANOTHER_BOLT] ACKing tuple: source: 5:9, stream: 1, id: {-5117078009445186058=-5117078009445186058}, [source: 2:2, stream: 1, id: {-7100026097570233628=-9118586029611278300}, [Hello storm-jms!]] + DEBUG (backtype.storm.contrib.jms.spout.JmsSpout:251) - JMS Message acked: ID:budreau.home-60117-1321735025796-0:0:1:1:1 + +The topology will run for 2 minutes, then gracefully shut down. + diff --git a/docs/storm-jms-spring.md b/docs/storm-jms-spring.md new file mode 100644 index 00000000000..df523aae887 --- /dev/null +++ b/docs/storm-jms-spring.md @@ -0,0 +1,25 @@ +--- +title: Storm JMS Integration +layout: documentation +documentation: true +--- +###Connecting to JMS Using Spring's JMS Support + +Create a Spring applicationContext.xml file that defines one or more destination (topic/queue) beans, as well as a connection factory. + + + + + + + + + + + \ No newline at end of file diff --git a/docs/storm-jms.md b/docs/storm-jms.md new file mode 100644 index 00000000000..66a247e3c88 --- /dev/null +++ b/docs/storm-jms.md @@ -0,0 +1,33 @@ +--- +title: Storm JMS Integration +layout: documentation +documentation: true +--- + +## About Storm JMS +Storm JMS is a generic framework for integrating JMS messaging within the Storm framework. + + +Storm-JMS allows you to inject data into Storm via a generic JMS spout, as well as consume data from Storm via a generic JMS bolt. + +Both the JMS Spout and JMS Bolt are data agnostic. To use them, you provide a simple Java class that bridges the JMS and Storm APIs and encapsulates and domain-specific logic. + +## Components + +### JMS Spout +The JMS Spout component allows for data published to a JMS topic or queue to be consumed by a Storm topology. + +A JMS Spout connects to a JMS Destination (topic or queue), and emits Storm "Tuple" objects based on the content of the JMS message received. + + +### JMS Bolt +The JMS Bolt component allows for data within a Storm topology to be published to a JMS destination (topic or queue). + +A JMS Bolt connects to a JMS Destination, and publishes JMS Messages based on the Storm "Tuple" objects it receives. + + +[Example Topology](storm-jms-example.html) + + +[Using Spring JMS](storm-jms-spring.html) + diff --git a/docs/storm-kafka-client.md b/docs/storm-kafka-client.md new file mode 100644 index 00000000000..73c88c72d6e --- /dev/null +++ b/docs/storm-kafka-client.md @@ -0,0 +1,353 @@ +--- +title: Storm Kafka Integration (0.10.x+) +layout: documentation +documentation: true +--- +# Storm Apache Kafka integration using the kafka-client jar +This includes the new Apache Kafka consumer API. + +## Compatibility + +Apache Kafka versions 0.10.1.0 onwards. Please be aware that [KAFKA-7044](https://issues.apache.org/jira/browse/KAFKA-7044) can cause crashes in the spout, so you should upgrade Kafka if you are using an affected version (1.1.0, 1.1.1 or 2.0.0). + +## Writing to Kafka as part of your topology +You can create an instance of org.apache.storm.kafka.bolt.KafkaBolt and attach it as a component to your topology or if you +are using trident you can use org.apache.storm.kafka.trident.TridentState, org.apache.storm.kafka.trident.TridentStateFactory and +org.apache.storm.kafka.trident.TridentKafkaUpdater. + +You need to provide implementations for the following 2 interfaces + +### TupleToKafkaMapper and TridentTupleToKafkaMapper +These interfaces have 2 methods defined: + +```java +K getKeyFromTuple(Tuple/TridentTuple tuple); +V getMessageFromTuple(Tuple/TridentTuple tuple); +``` + +As the name suggests, these methods are called to map a tuple to a Kafka key and a Kafka message. If you just want one field +as key and one field as value, then you can use the provided FieldNameBasedTupleToKafkaMapper.java +implementation. In the KafkaBolt, the implementation always looks for a field with field name "key" and "message" if you +use the default constructor to construct FieldNameBasedTupleToKafkaMapper for backward compatibility +reasons. Alternatively you could also specify a different key and message field by using the non default constructor. +In the TridentKafkaState you must specify what is the field name for key and message as there is no default constructor. +These should be specified while constructing an instance of FieldNameBasedTupleToKafkaMapper. + +### KafkaTopicSelector and trident KafkaTopicSelector +This interface has only one method + +```java +public interface KafkaTopicSelector { + String getTopics(Tuple/TridentTuple tuple); +} +``` + +The implementation of this interface should return the topic to which the tuple's key/message mapping needs to be published +You can return a null and the message will be ignored. If you have one static topic name then you can use +DefaultTopicSelector.java and set the name of the topic in the constructor. +`FieldNameTopicSelector` and `FieldIndexTopicSelector` can be used to select the topic should to publish a tuple to. +A user just needs to specify the field name or field index for the topic name in the tuple itself. +When the topic is name not found , the `Field*TopicSelector` will write messages into default topic . +Please make sure the default topic has been created . + +### Specifying Kafka producer properties +You can provide all the producer properties in your Storm topology by calling `KafkaBolt.withProducerProperties()` and `TridentKafkaStateFactory.withProducerProperties()`. Please see http://kafka.apache.org/documentation.html#newproducerconfigs +Section "Important configuration properties for the producer" for more details. +These are also defined in `org.apache.kafka.clients.producer.ProducerConfig` + +### Using wildcard kafka topic match +You can do a wildcard topic match by adding the following config + +```java +Config config = new Config(); +config.put("kafka.topic.wildcard.match",true); +``` + +After this you can specify a wildcard topic for matching e.g. clickstream.*.log. This will match all streams matching clickstream.my.log, clickstream.cart.log etc + + +### Putting it all together + +For the bolt : + +```java +TopologyBuilder builder = new TopologyBuilder(); + +Fields fields = new Fields("key", "message"); +FixedBatchSpout spout = new FixedBatchSpout(fields, 4, + new Values("storm", "1"), + new Values("trident", "1"), + new Values("needs", "1"), + new Values("javadoc", "1") +); +spout.setCycle(true); +builder.setSpout("spout", spout, 5); +//set producer properties. +Properties props = new Properties(); +props.put("bootstrap.servers", "localhost:9092"); +props.put("acks", "1"); +props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); +props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); + +KafkaBolt bolt = new KafkaBolt() + .withProducerProperties(props) + .withTopicSelector(new DefaultTopicSelector("test")) + .withTupleToKafkaMapper(new FieldNameBasedTupleToKafkaMapper()); +builder.setBolt("forwardToKafka", bolt, 8).shuffleGrouping("spout"); + +Config conf = new Config(); + +StormSubmitter.submitTopology("kafkaboltTest", conf, builder.createTopology()); +``` + +For Trident: + +```java +Fields fields = new Fields("word", "count"); +FixedBatchSpout spout = new FixedBatchSpout(fields, 4, + new Values("storm", "1"), + new Values("trident", "1"), + new Values("needs", "1"), + new Values("javadoc", "1") +); +spout.setCycle(true); + +TridentTopology topology = new TridentTopology(); +Stream stream = topology.newStream("spout1", spout); + +//set producer properties. +Properties props = new Properties(); +props.put("bootstrap.servers", "localhost:9092"); +props.put("acks", "1"); +props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); +props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); + +TridentKafkaStateFactory stateFactory = new TridentKafkaStateFactory() + .withProducerProperties(props) + .withKafkaTopicSelector(new DefaultTopicSelector("test")) + .withTridentTupleToKafkaMapper(new FieldNameBasedTupleToKafkaMapper("word", "count")); +stream.partitionPersist(stateFactory, fields, new TridentKafkaStateUpdater(), new Fields()); + +Config conf = new Config(); +StormSubmitter.submitTopology("kafkaTridentTest", conf, topology.build()); +``` + +## Reading From kafka (Spouts) + +### Configuration + +The spout implementations are configured by use of the `KafkaSpoutConfig` class. This class uses a Builder pattern and can be started either by calling one of +the Builders constructors or by calling the static method builder in the KafkaSpoutConfig class. + +The Constructor or static method to create the builder require a few key values (that can be changed later on) but are the minimum config needed to start +a spout. + +`bootstrapServers` is the same as the Kafka Consumer Property "bootstrap.servers". +`topics` The topics the spout will consume can either be a `Collection` of specific topic names (1 or more) or a regular expression `Pattern`, which specifies +that any topics that match that regular expression will be consumed. + +If you are using the Builder Constructors instead of one of the `builder` methods, you will also need to specify a key deserializer and a value deserializer. This is to help guarantee type safety through the use +of Java generics. The deserializers can be specified via the consumer properties set with `setProp`. See the KafkaConsumer configuration documentation for details. + +There are a few key configs to pay attention to. + +`setFirstPollOffsetStrategy` allows you to set where to start consuming data from. This is used both in case of failure recovery and starting the spout +for the first time. The allowed values are listed in the [FirstPollOffsetStrategy javadocs](javadocs/org/apache/storm/kafka/spout/KafkaSpoutConfig.FirstPollOffsetStrategy.html). + +`setProcessingGuarantee` lets you configure what processing guarantees the spout will provide. This affects how soon consumed offsets can be committed, and the frequency of commits. See the [ProcessingGuarantee javadoc](javadocs/org/apache/storm/kafka/spout/KafkaSpoutConfig.ProcessingGuarantee.html) for details. + +`setRecordTranslator` allows you to modify how the spout converts a Kafka Consumer Record into a Tuple, and which stream that tuple will be published into. +By default the "topic", "partition", "offset", "key", and "value" will be emitted to the "default" stream. If you want to output entries to different +streams based on the topic, storm provides `ByTopicRecordTranslator`. See below for more examples on how to use these. + +`setProp` and `setProps` can be used to set KafkaConsumer properties. The list of these properties can be found in the KafkaConsumer configuration documentation on the [Kafka website](http://kafka.apache.org/documentation.html#consumerconfigs). Note that KafkaConsumer autocommit is unsupported. The KafkaSpoutConfig constructor will throw an exception if the "enable.auto.commit" property is set, and the consumer used by the spout will always have that property set to false. You can configure similar behavior to autocommit through the `setProcessingGuarantee` method on the KafkaSpoutConfig builder. + +### Usage Examples + +#### Create a Simple Insecure Spout +The following will consume all events published to "topic" and send them to MyBolt with the fields "topic", "partition", "offset", "key", "value". + +```java + +final TopologyBuilder tp = new TopologyBuilder(); +tp.setSpout("kafka_spout", new KafkaSpout<>(KafkaSpoutConfig.builder("127.0.0.1:" + port, "topic").build()), 1); +tp.setBolt("bolt", new myBolt()).shuffleGrouping("kafka_spout"); +... +``` + +#### Wildcard Topics +Wildcard topics will consume from all topics that exist in the specified brokers list and match the pattern. So in the following example +"topic", "topic_foo" and "topic_bar" will all match the pattern "topic.*", but "not_my_topic" would not match. + +```java + +final TopologyBuilder tp = new TopologyBuilder(); +tp.setSpout("kafka_spout", new KafkaSpout<>(KafkaSpoutConfig.builder("127.0.0.1:" + port, Pattern.compile("topic.*")).build()), 1); +tp.setBolt("bolt", new myBolt()).shuffleGrouping("kafka_spout"); +... +``` + +#### Multiple Streams + +```java + +final TopologyBuilder tp = new TopologyBuilder(); + +//By default all topics not covered by another rule, but consumed by the spout will be emitted to "STREAM_1" as "topic", "key", and "value" +ByTopicRecordTranslator byTopic = new ByTopicRecordTranslator<>( + (r) -> new Values(r.topic(), r.key(), r.value()), + new Fields("topic", "key", "value"), "STREAM_1"); +//For topic_2 all events will be emitted to "STREAM_2" as just "key" and "value" +byTopic.forTopic("topic_2", (r) -> new Values(r.key(), r.value()), new Fields("key", "value"), "STREAM_2"); + +tp.setSpout("kafka_spout", new KafkaSpout<>(KafkaSpoutConfig.builder("127.0.0.1:" + port, "topic_1", "topic_2", "topic_3").build()), 1); +tp.setBolt("bolt", new myBolt()).shuffleGrouping("kafka_spout", "STREAM_1"); +tp.setBolt("another", new myOtherBolt()).shuffleGrouping("kafka_spout", "STREAM_2"); +... +``` + +#### Trident + +```java +final TridentTopology tridentTopology = new TridentTopology(); +final Stream spoutStream = tridentTopology.newStream("kafkaSpout", + new KafkaTridentSpoutOpaque<>(KafkaSpoutConfig.builder("127.0.0.1:" + port, Pattern.compile("topic.*")).build())) + .parallelismHint(1) +... +``` + +Trident does not support multiple streams and will ignore any streams set for output. If however the Fields are not identical for each +output topic it will throw an exception and not continue. + +#### Example topologies +Example topologies using storm-kafka-client can be found in the examples/storm-kafka-client-examples directory included in the Storm source or binary distributions. + +### Custom RecordTranslators (ADVANCED) + +In most cases the built in SimpleRecordTranslator and ByTopicRecordTranslator should cover your use case. If you do run into a situation where you need a custom one +then this documentation will describe how to do this properly, and some of the less than obvious classes involved. + +The point of `apply` is to take a ConsumerRecord and turn it into a `List` that can be emitted. What is not obvious is how to tell the spout to emit it to a +specific stream. To do this you will need to return an instance of `org.apache.storm.kafka.spout.KafkaTuple`. This provides a method `routedTo` that will say which +specific stream the tuple should go to. + +For Example: + +```java +return new KafkaTuple(1, 2, 3, 4).routedTo("bar"); +``` + +Will cause the tuple to be emitted on the "bar" stream. + +Be careful when writing custom record translators because just like in a storm spout it needs to be self consistent. The `streams` method should return +a full set of streams that this translator will ever try to emit on. Additionally `getFieldsFor` should return a valid Fields object for each of those +streams. If you are doing this for Trident a value must be in the List returned by `apply` for every field in the Fields object for that stream, +otherwise trident can throw exceptions. + + +### Manual Partition Assignment (ADVANCED) + +By default the KafkaSpout instances will be assigned partitions using a round robin strategy. If you need to customize partition assignment, you must implement the `ManualPartitioner` interface. You can pass your implementation to the `KafkaSpoutConfig.Builder` constructor. Please take care when supplying a custom implementation, since an incorrect `ManualPartitioner` implementation could leave some partitions unread, or concurrently read by multiple spout instances. See the `RoundRobinManualPartitioner` for an example of how to implement this functionality. + +### Manual partition discovery + +You can customize how the spout discovers existing partitions, by implementing the `TopicFilter` interface. Storm-kafka-client ships with a few implementations. Like `ManualPartitioner`, you can pass your implementation to the `KafkaSpoutConfig.Builder` constructor. Note that the `TopicFilter` is only responsible for discovering partitions, deciding which of the discovered partitions to subscribe to is the responsibility of `ManualPartitioner`. + +## Using storm-kafka-client with different versions of kafka + +Storm-kafka-client's Kafka dependency is defined as `provided` scope in maven, meaning it will not be pulled in +as a transitive dependency. This allows you to use a version of Kafka dependency compatible with your kafka cluster. + +When building a project with storm-kafka-client, you must explicitly add the Kafka clients dependency. For example, to +use Kafka-clients 0.10.0.0, you would use the following dependency in your `pom.xml`: + +```xml + + org.apache.kafka + kafka-clients + 0.10.0.0 + +``` + +You can also override the kafka clients version while building from maven, with parameter `storm.kafka.client.version` +e.g. `mvn clean install -Dstorm.kafka.client.version=0.10.0.0` + +When selecting a kafka client version, you should ensure - + 1. The Kafka api must be compatible. The storm-kafka-client module only supports Kafka **0.10 or newer**. For older versions, + you can use the storm-kafka module (https://github.com/apache/storm/tree/1.x-branch/external/storm-kafka). + 2. The Kafka client version selected by you should be wire compatible with the broker. Please see the [Kafka compatibility matrix](https://cwiki.apache.org/confluence/display/KAFKA/Compatibility+Matrix). + +# Kafka Spout Performance Tuning + +The Kafka spout provides two internal parameters to control its performance. The parameters can be set using the [setOffsetCommitPeriodMs](javadocs/org/apache/storm/kafka/spout/KafkaSpoutConfig.Builder.html#setOffsetCommitPeriodMs-long-) and [setMaxUncommittedOffsets](javadocs/org/apache/storm/kafka/spout/KafkaSpoutConfig.Builder.html#setMaxUncommittedOffsets-int-) methods. + +* "offset.commit.period.ms" controls how often the spout commits to Kafka +* "max.uncommitted.offsets" controls how many offsets can be pending commit before another poll can take place +
+ +The [Kafka consumer config] (http://kafka.apache.org/documentation.html#consumerconfigs) parameters may also have an impact on the performance of the spout. The following Kafka parameters are likely the most influential in the spout performance: + +* “fetch.min.bytes” +* “fetch.max.wait.ms” +* [Kafka Consumer](http://kafka.apache.org/090/javadoc/index.html?org/apache/kafka/clients/consumer/KafkaConsumer.html) instance poll timeout, which is specified for each Kafka spout using the [setPollTimeoutMs](javadocs/org/apache/storm/kafka/spout/KafkaSpoutConfig.Builder.html#setPollTimeoutMs-long-) method. +
+ +Depending on the structure of your Kafka cluster, distribution of the data, and availability of data to poll, these parameters will have to be configured appropriately. Please refer to the Kafka documentation on Kafka parameter tuning. + +### Default values + +Currently the Kafka spout has has the following default values, which have been shown to give good performance in the test environment as described in this [blog post] (https://hortonworks.com/blog/microbenchmarking-storm-1-0-performance/) + +* poll.timeout.ms = 200 +* offset.commit.period.ms = 30000 (30s) +* max.uncommitted.offsets = 10000000 +
+ +# Tuple Tracking + +By default the spout only tracks emitted tuples when the processing guarantee is AT_LEAST_ONCE. It may be necessary to track +emitted tuples with other processing guarantees to benefit from Storm features such as showing complete latency in the UI, +or enabling backpressure with Config.TOPOLOGY_MAX_SPOUT_PENDING. + +```java +KafkaSpoutConfig kafkaConf = KafkaSpoutConfig + .builder(String bootstrapServers, String ... topics) + .setProcessingGuarantee(ProcessingGuarantee.AT_MOST_ONCE) + .setTupleTrackingEnforced(true) +``` + +Note: This setting has no effect with AT_LEAST_ONCE processing guarantee, where tuple tracking is required and therefore always enabled. + +# Mapping from `storm-kafka` to `storm-kafka-client` spout properties + +This may not be an exhaustive list because the `storm-kafka` configs were taken from Storm 0.9.6 +[SpoutConfig](https://svn.apache.org/repos/asf/storm/site/releases/0.9.6/javadocs/storm/kafka/SpoutConfig.html) and +[KafkaConfig](https://svn.apache.org/repos/asf/storm/site/releases/0.9.6/javadocs/storm/kafka/KafkaConfig.html). +`storm-kafka-client` spout configurations were taken from Storm 1.0.6 +[KafkaSpoutConfig](https://storm.apache.org/releases/1.0.6/javadocs/org/apache/storm/kafka/spout/KafkaSpoutConfig.html) +and Kafka 0.10.1.0 [ConsumerConfig](https://kafka.apache.org/0101/javadoc/index.html?org/apache/kafka/clients/consumer/ConsumerConfig.html). + +| SpoutConfig | KafkaSpoutConfig/ConsumerConfig | KafkaSpoutConfig Usage | +| ------------- | ------------------------------- | ---------------------- | +| **Setting:** `startOffsetTime`

**Default:** `EarliestTime`
________________________________________________
**Setting:** `forceFromStart`

**Default:** `false`

`startOffsetTime` & `forceFromStart` together determine the starting offset. `forceFromStart` determines whether the Zookeeper offset is ignored. `startOffsetTime` sets the timestamp that determines the beginning offset, in case there is no offset in Zookeeper, or the Zookeeper offset is ignored | **Setting:** [`FirstPollOffsetStrategy`](javadocs/org/apache/storm/kafka/spout/KafkaSpoutConfig.FirstPollOffsetStrategy.html)

**Default:** `UNCOMMITTED_EARLIEST`

[Refer to the helper table](#helper-table-for-setting-firstpolloffsetstrategy) for picking `FirstPollOffsetStrategy` based on your `startOffsetTime` & `forceFromStart` settings | [`.setFirstPollOffsetStrategy()`](javadocs/org/apache/storm/kafka/spout/KafkaSpoutConfig.Builder.html#setFirstPollOffsetStrategy-org.apache.storm.kafka.spout.KafkaSpoutConfig.FirstPollOffsetStrategy-)| +| **Setting:** `scheme`

The interface that specifies how a `ByteBuffer` from a Kafka topic is transformed into Storm tuple
**Default:** `RawMultiScheme` | **Setting:** [`Deserializers`](https://kafka.apache.org/11/javadoc/org/apache/kafka/common/serialization/Deserializer.html)| [`.setProp(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, )`](javadocs/org/apache/storm/kafka/spout/KafkaSpoutConfig.Builder.html#setProp-java.lang.String-java.lang.Object-)

[`.setProp(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, )`](javadocs/org/apache/storm/kafka/spout/KafkaSpoutConfig.Builder.html#setProp-java.lang.String-java.lang.Object-)| +| **Setting:** `fetchSizeBytes`

Message fetch size -- the number of bytes to attempt to fetch in one request to a Kafka server
**Default:** `1MB` | **Setting:** [`max.partition.fetch.bytes`](http://kafka.apache.org/10/documentation.html#newconsumerconfigs) | [`.setProp(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, )`](javadocs/org/apache/storm/kafka/spout/KafkaSpoutConfig.Builder.html#setProp-java.lang.String-java.lang.Object-)| +| **Setting:** `bufferSizeBytes`

Buffer size (in bytes) for network requests. The buffer size which consumer has for pulling data from producer
**Default:** `1MB`| **Setting:** [`receive.buffer.bytes`](http://kafka.apache.org/10/documentation.html#newconsumerconfigs) | [`.setProp(ConsumerConfig.RECEIVE_BUFFER_CONFIG, )`](javadocs/org/apache/storm/kafka/spout/KafkaSpoutConfig.Builder.html#setProp-java.lang.String-java.lang.Object-)| +| **Setting:** `socketTimeoutMs`

**Default:** `10000` | **N/A** || +| **Setting:** `useStartOffsetTimeIfOffsetOutOfRange`

**Default:** `true` | **Setting:** [`auto.offset.reset`](http://kafka.apache.org/10/documentation.html#newconsumerconfigs)

**Default:** Note that the default value for `auto.offset.reset` is `earliest` if you have [`ProcessingGuarantee`](javadocs/org/apache/storm/kafka/spout/KafkaSpoutConfig.ProcessingGuarantee.html) set to `AT_LEAST_ONCE`, but the default value is `latest` otherwise.| [`.setProp(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, )`](javadocs/org/apache/storm/kafka/spout/KafkaSpoutConfig.Builder.html#setProp-java.lang.String-java.lang.Object-)| +| **Setting:** `fetchMaxWait`

Maximum time in ms to wait for the response
**Default:** `10000` | **Setting:** [`fetch.max.wait.ms`](http://kafka.apache.org/10/documentation.html#newconsumerconfigs) | [`.setProp(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, )`](javadocs/org/apache/storm/kafka/spout/KafkaSpoutConfig.Builder.html#setProp-java.lang.String-java.lang.Object-)| +| **Setting:** `maxOffsetBehind`

Specifies how long a spout attempts to retry the processing of a failed tuple. One of the scenarios is when a failing tuple's offset is more than `maxOffsetBehind` behind the acked offset, the spout stops retrying the tuple.
**Default:** `LONG.MAX_VALUE`| **N/A** || +| **Setting:** `clientId`| **Setting:** [`client.id`](http://kafka.apache.org/10/documentation.html#newconsumerconfigs)| [`.setProp(ConsumerConfig.CLIENT_ID_CONFIG, )`](javadocs/org/apache/storm/kafka/spout/KafkaSpoutConfig.Builder.html#setProp-java.lang.String-java.lang.Object-)| + +If you are using this table to upgrade your topology to use `storm-kafka-client` instead of `storm-kafka`, then you will also need to migrate the consumer offsets from ZooKeeper to Kafka broker. Use [`storm-kafka-migration`](https://github.com/apache/storm/tree/master/external/storm-kafka-migration) tool to migrate the Kafka consumer offsets. + +#### Helper table for setting `FirstPollOffsetStrategy` + +Pick and set `FirstPollOffsetStrategy` based on `startOffsetTime` & `forceFromStart` settings: + +| `startOffsetTime` | `forceFromStart` | `FirstPollOffsetStrategy` | +| -------------------- | ---------------- | ------------------------- | +| `EarliestTime` | `true` | `EARLIEST` | +| `EarliestTime` | `false` | `UNCOMMITTED_EARLIEST` | +| `LatestTime` | `true` | `LATEST` | +| `LatestTime` | `false` | `UNCOMMITTED_LATEST` | diff --git a/docs/storm-metricstore.md b/docs/storm-metricstore.md new file mode 100644 index 00000000000..cafc2b50204 --- /dev/null +++ b/docs/storm-metricstore.md @@ -0,0 +1,86 @@ +--- +title: Storm Metricstore +layout: documentation +documentation: true +--- +A metric store ([`MetricStore`]({{page.git-blob-base}}/storm-server/src/main/java/org/apache/storm/metricstore/MetricStore.java)) interface was added +to Nimbus to allow storing metric information ([`Metric`]({{page.git-blob-base}}/storm-server/src/main/java/org/apache/storm/metricstore/Metric.java)) +to a database. The default implementation +([`RocksDbStore`]({{page.git-blob-base}}/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/RocksDbStore.java)) is using RocksDB, +a key-value store. + +As metrics are stored in RocksDB, their string values (for topology ID and executor ID, etc.) are converted to unique integer IDs, and these strings +are also stored to the database as metadata indexed by the integer ID. When a metric is stored, it is also aggregated with any existing metric +within the same 1, 10, and 60 minute timeframe. + +The [`FilterOptions`]({{page.git-blob-base}}/storm-server/src/main/java/org/apache/storm/metricstore/FilterOptions.java) class provides an interface +to select which options can be used to scan the metrics. + + +### Configuration + +The following configuation options exist: + +```yaml +storm.metricstore.class: "org.apache.storm.metricstore.rocksdb.RocksDbStore" +storm.metricprocessor.class: "org.apache.storm.metricstore.NimbusMetricProcessor" +storm.metricstore.rocksdb.location: "storm_rocks" +storm.metricstore.rocksdb.create_if_missing: true +storm.metricstore.rocksdb.metadata_string_cache_capacity: 4000 +storm.metricstore.rocksdb.retention_hours: 240 +``` + +* storm.metricstore.class is the class that implements the +([`MetricStore`]({{page.git-blob-base}}/storm-server/src/main/java/org/apache/storm/metricstore/MetricStore.java)). +* storm.metricprocessor.class is the class that implements the +([`WorkerMetricsProcessor`]({{page.git-blob-base}}/storm-server/src/main/java/org/apache/storm/metricstore/WorkerMetricsProcessor.java)). +* storm.metricstore.rocksdb.location provides to location of the RocksDB database on Nimbus +* storm.metricstore.rocksdb.create_if_missing permits creating a RocksDB database if missing +* storm.metricstore.rocksdb.metadata_string_cache_capacity controls the number of metadata strings cached in memory. +* storm.metricstore.rocksdb.retention_hours sets the length of time metrics will remain active. + + +### RocksDB Schema + +The RocksDB Key (represented by [`RocksDbKey`]({{page.git-blob-base}}/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/RocksDbKey.java)) +fields are as follows: + + +| Field | Size | Offset | Description | +|-------------------|------|--------|--------------------------------------------------------------------------------------------------------------| +| Type | 1 | 0 | The type maps to the KeyType enum, specifying a metric or various types of metadata strings | +| Aggregation Level | 1 | 1 | The aggregation level for a metric (see AggLevel enum). Set to 0 for metadata. | +| Topology Id | 4 | 2 | The metadata string Id representing a topologyId for a metric, or the unique string Id for a metadata string | +| Timestamp | 8 | 6 | The timestamp for a metric, unused for metadata | +| Metric Id | 4 | 14 | The metadata string Id for the metric name | +| Component Id | 4 | 18 | The metadata string Id for the component Id | +| Executor Id | 4 | 22 | The metadata string Id for the executor Id | +| Host Id | 4 | 26 | The metadata string Id for the host Id | +| Port | 4 | 30 | The port number | +| Stream Id | 4 | 34 | The metadata string Id for the stream Id | + + +The RocksDB Value fields for metadata strings (represented by +[`RocksDbValue`]({{page.git-blob-base}}/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/RocksDbValue.java)) are as follows: + + +| Field | Size | Offset | Description | +|-----------------|------|--------|----------------------------------------------------------------------------------------| +| Version | 1 | 0 | The current metadata version - allows migrating if the format changes in the future | +| Timestamp | 8 | 1 | The time when the metadata was last used by a metric. Allows deleting of old metadata. | +| Metadata String | any | 9 | The metadata string | + + +RocksDB Value fields for metric data are as follows: + +| Field | Size | Offset | Description | +|---------|------|--------|-----------------------------------------------------------------------------------| +| Version | 1 | 0 | The current metric version - allows migrating if the format changes in the future | +| Value | 8 | 1 | The metric value | +| Count | 8 | 9 | The metric count | +| Min | 8 | 17 | The minimum metric value | +| Max | 8 | 25 | The maximum metric value | +| Sum | 8 | 33 | The sum of the metric values | + + + diff --git a/docs/storm-redis.md b/docs/storm-redis.md new file mode 100644 index 00000000000..f77f6d6ec59 --- /dev/null +++ b/docs/storm-redis.md @@ -0,0 +1,273 @@ +--- +title: Storm Redis Integration +layout: documentation +documentation: true +--- + +Storm/Trident integration for [Redis](http://redis.io/) + +Storm-redis uses Jedis for Redis client. + +## Usage + +### How do I use it? + +use it as a maven dependency: + +```xml + + org.apache.storm + storm-redis + ${storm.version} + jar + +``` + +### For normal Bolt + +Storm-redis provides basic Bolt implementations, `RedisLookupBolt` and `RedisStoreBolt`, and `RedisFilterBolt`. + +As name represents its usage, `RedisLookupBolt` retrieves value from Redis using key, and `RedisStoreBolt` stores key / value to Redis, and `RedisFilterBolt` filters out tuple which key or field doesn't exist on Redis. + +One tuple will be matched to one key / value pair, and you can define match pattern to `TupleMapper`. + +You can also choose data type from `RedisDataTypeDescription` to use. Please refer `RedisDataTypeDescription.RedisDataType` to see what data types are supported. In some data types (hash and sorted set, and set if only RedisFilterBolt), it requires additional key and converted key from tuple becomes element. + +These interfaces are combined with `RedisLookupMapper` and `RedisStoreMapper` and `RedisFilterMapper` which fit `RedisLookupBolt` and `RedisStoreBolt`, and `RedisFilterBolt` respectively. +(When you want to implement RedisFilterMapper, be sure to set declareOutputFields() to declare same fields to input stream, since FilterBolt forwards input tuples when they exist on Redis.) + +#### RedisLookupBolt example + +```java +class WordCountRedisLookupMapper implements RedisLookupMapper { + private RedisDataTypeDescription description; + private final String hashKey = "wordCount"; + + public WordCountRedisLookupMapper() { + description = new RedisDataTypeDescription( + RedisDataTypeDescription.RedisDataType.HASH, hashKey); + } + + @Override + public List toTuple(ITuple input, Object value) { + String member = getKeyFromTuple(input); + List values = Lists.newArrayList(); + values.add(new Values(member, value)); + return values; + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("wordName", "count")); + } + + @Override + public RedisDataTypeDescription getDataTypeDescription() { + return description; + } + + @Override + public String getKeyFromTuple(ITuple tuple) { + return tuple.getStringByField("word"); + } + + @Override + public String getValueFromTuple(ITuple tuple) { + return null; + } +} +``` + +```java +JedisPoolConfig poolConfig = new JedisPoolConfig.Builder() + .setHost(host).setPort(port).build(); +RedisLookupMapper lookupMapper = new WordCountRedisLookupMapper(); +RedisLookupBolt lookupBolt = new RedisLookupBolt(poolConfig, lookupMapper); +``` + +#### RedisFilterBolt example + +```java +class BlacklistWordFilterMapper implements RedisFilterMapper { + private RedisDataTypeDescription description; + private final String setKey = "blacklist"; + + public BlacklistWordFilterMapper() { + description = new RedisDataTypeDescription( + RedisDataTypeDescription.RedisDataType.SET, setKey); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("word", "count")); + } + + @Override + public RedisDataTypeDescription getDataTypeDescription() { + return description; + } + + @Override + public String getKeyFromTuple(ITuple tuple) { + return tuple.getStringByField("word"); + } + + @Override + public String getValueFromTuple(ITuple tuple) { + return null; + } +} +``` + +```java +JedisPoolConfig poolConfig = new JedisPoolConfig.Builder() + .setHost(host).setPort(port).build(); +RedisFilterMapper filterMapper = new BlacklistWordFilterMapper(); +RedisFilterBolt filterBolt = new RedisFilterBolt(poolConfig, filterMapper); +``` + +#### RedisStoreBolt example + +```java +class WordCountStoreMapper implements RedisStoreMapper { + private RedisDataTypeDescription description; + private final String hashKey = "wordCount"; + + public WordCountStoreMapper() { + description = new RedisDataTypeDescription( + RedisDataTypeDescription.RedisDataType.HASH, hashKey); + } + + @Override + public RedisDataTypeDescription getDataTypeDescription() { + return description; + } + + @Override + public String getKeyFromTuple(ITuple tuple) { + return tuple.getStringByField("word"); + } + + @Override + public String getValueFromTuple(ITuple tuple) { + return tuple.getStringByField("count"); + } +} +``` + +```java +JedisPoolConfig poolConfig = new JedisPoolConfig.Builder() + .setHost(host).setPort(port).build(); +RedisStoreMapper storeMapper = new WordCountStoreMapper(); +RedisStoreBolt storeBolt = new RedisStoreBolt(poolConfig, storeMapper); +``` + +### For non-simple Bolt + +If your scenario doesn't fit ```RedisStoreBolt``` and ```RedisLookupBolt``` and ```RedisFilterBolt```, storm-redis also provides ```AbstractRedisBolt``` to let you extend and apply your business logic. + +```java + public static class LookupWordTotalCountBolt extends AbstractRedisBolt { + private static final Logger LOG = LoggerFactory.getLogger(LookupWordTotalCountBolt.class); + private static final Random RANDOM = new Random(); + + public LookupWordTotalCountBolt(JedisPoolConfig config) { + super(config); + } + + public LookupWordTotalCountBolt(JedisClusterConfig config) { + super(config); + } + + @Override + public void execute(Tuple input) { + JedisCommands jedisCommands = null; + try { + jedisCommands = getInstance(); + String wordName = input.getStringByField("word"); + String countStr = jedisCommands.get(wordName); + if (countStr != null) { + int count = Integer.parseInt(countStr); + this.collector.emit(new Values(wordName, count)); + + // print lookup result with low probability + if(RANDOM.nextInt(1000) > 995) { + LOG.info("Lookup result - word : " + wordName + " / count : " + count); + } + } else { + // skip + LOG.warn("Word not found in Redis - word : " + wordName); + } + } finally { + if (jedisCommands != null) { + returnInstance(jedisCommands); + } + this.collector.ack(input); + } + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + // wordName, count + declarer.declare(new Fields("wordName", "count")); + } + } +``` + +### Trident State usage + +1. RedisState and RedisMapState, which provide Jedis interface just for single redis. + +2. RedisClusterState and RedisClusterMapState, which provide JedisCluster interface, just for redis cluster. + +RedisState + +```java + JedisPoolConfig poolConfig = new JedisPoolConfig.Builder() + .setHost(redisHost).setPort(redisPort) + .build(); + RedisStoreMapper storeMapper = new WordCountStoreMapper(); + RedisLookupMapper lookupMapper = new WordCountLookupMapper(); + RedisState.Factory factory = new RedisState.Factory(poolConfig); + + TridentTopology topology = new TridentTopology(); + Stream stream = topology.newStream("spout1", spout); + + stream.partitionPersist(factory, + fields, + new RedisStateUpdater(storeMapper).withExpire(86400000), + new Fields()); + + TridentState state = topology.newStaticState(factory); + stream = stream.stateQuery(state, new Fields("word"), + new RedisStateQuerier(lookupMapper), + new Fields("columnName","columnValue")); +``` + +RedisClusterState + +```java + Set nodes = new HashSet(); + for (String hostPort : redisHostPort.split(",")) { + String[] host_port = hostPort.split(":"); + nodes.add(new InetSocketAddress(host_port[0], Integer.valueOf(host_port[1]))); + } + JedisClusterConfig clusterConfig = new JedisClusterConfig.Builder().setNodes(nodes) + .build(); + RedisStoreMapper storeMapper = new WordCountStoreMapper(); + RedisLookupMapper lookupMapper = new WordCountLookupMapper(); + RedisClusterState.Factory factory = new RedisClusterState.Factory(clusterConfig); + + TridentTopology topology = new TridentTopology(); + Stream stream = topology.newStream("spout1", spout); + + stream.partitionPersist(factory, + fields, + new RedisClusterStateUpdater(storeMapper).withExpire(86400000), + new Fields()); + + TridentState state = topology.newStaticState(factory); + stream = stream.stateQuery(state, new Fields("word"), + new RedisClusterStateQuerier(lookupMapper), + new Fields("columnName","columnValue")); +``` diff --git a/docs/windows-users-guide.md b/docs/windows-users-guide.md new file mode 100644 index 00000000000..9c9a8506ab5 --- /dev/null +++ b/docs/windows-users-guide.md @@ -0,0 +1,32 @@ +--- +title: Windows Users Guide +layout: documentation +documentation: true +--- + +This page guides how to set up environment on Windows for Apache Storm. + +## Symbolic Link + +Starting at 1.0.0, Apache Storm utilizes `symbolic link` to aggregate log directory and resource directory into worker directory. +Unfortunately, `creating symbolic link` on Windows needs non-default privilege, so users should configure it manually to make sure Storm processes can create symbolic link on runtime. +Depending on the Windows version (i.e. non-professional), setting symbolic links privilege by a security policy is not possible since the tool is not installed. + +When creating a symbolic link is not possible, the Supervisor process will stop as soon as it tries to start workers since the permission exception is considered a fatal error. + +Below pages (MS technet) guide how to configure that policy to the account which Storm runs on. + +* [How to Configure Security Policy Settings](https://technet.microsoft.com/en-us/library/dn452420.aspx) +* [Create symbolic links](https://technet.microsoft.com/en-us/library/dn221947.aspx) + +One tricky point is, `administrator` group already has this privilege, but it's activated only process is run as `administrator` account. +So if your account belongs to `administrator` group (and you don't want to change it), you may want to open `command prompt` with `run as administrator` and execute processes within that console. +If you don't want to execute Storm processes directly (not on command prompt), please execute processes with `runas /user:administrator` to run as administrator account. + +Starting with Windows 10 Creators Update, it will be possible to activate a Developer Mode that supports creating symbolic links without `run as administrator` +[Symlinks in Windows 10!](https://blogs.windows.com/buildingapps/2016/12/02/symlinks-windows-10/) + +Alternatively you can disable usage of symbolic links by setting the config `storm.disable.symlinks` to `true` +on Nimbus and all of the Supervisor nodes. This will also disable features that require symlinks. Currently this is only downloading +dependent blobs, but may change in the future. Some topologies may rely on symbolic links to resources in the current working directory of the worker that are +created as a convienence, so it is not a 100% backwards compatible change. diff --git a/examples/storm-hdfs-examples/pom.xml b/examples/storm-hdfs-examples/pom.xml new file mode 100644 index 00000000000..8933ab552eb --- /dev/null +++ b/examples/storm-hdfs-examples/pom.xml @@ -0,0 +1,104 @@ + + + + 4.0.0 + + + storm + org.apache.storm + 2.8.4-SNAPSHOT + ../../pom.xml + + + storm-hdfs-examples + + + + org.apache.storm + storm-client + ${project.version} + ${provided.scope} + + + org.apache.storm + storm-hdfs + ${project.version} + + + com.google.guava + guava + + + org.yaml + snakeyaml + + + + + + + org.apache.maven.plugins + maven-shade-plugin + + true + + + *:* + + META-INF/*.SF + META-INF/*.sf + META-INF/*.DSA + META-INF/*.dsa + META-INF/*.RSA + META-INF/*.rsa + META-INF/*.EC + META-INF/*.ec + META-INF/MSFTSIG.SF + META-INF/MSFTSIG.RSA + + + + + + + package + + shade + + + + + + + + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + + + org.apache.maven.plugins + maven-pmd-plugin + + + + diff --git a/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/bolt/HdfsFileTopology.java b/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/bolt/HdfsFileTopology.java new file mode 100644 index 00000000000..01a446c9353 --- /dev/null +++ b/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/bolt/HdfsFileTopology.java @@ -0,0 +1,190 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.bolt; + +import java.io.FileInputStream; +import java.io.InputStream; +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.hdfs.bolt.format.DefaultFileNameFormat; +import org.apache.storm.hdfs.bolt.format.DelimitedRecordFormat; +import org.apache.storm.hdfs.bolt.format.FileNameFormat; +import org.apache.storm.hdfs.bolt.format.RecordFormat; +import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy; +import org.apache.storm.hdfs.bolt.rotation.TimedRotationPolicy; +import org.apache.storm.hdfs.bolt.sync.CountSyncPolicy; +import org.apache.storm.hdfs.bolt.sync.SyncPolicy; +import org.apache.storm.hdfs.common.rotation.MoveFileAction; +import org.apache.storm.spout.SpoutOutputCollector; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.topology.base.BaseRichBolt; +import org.apache.storm.topology.base.BaseRichSpout; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; +import org.yaml.snakeyaml.Yaml; + +public class HdfsFileTopology { + static final String SENTENCE_SPOUT_ID = "sentence-spout"; + static final String BOLT_ID = "my-bolt"; + static final String TOPOLOGY_NAME = "test-topology"; + + public static void main(String[] args) throws Exception { + Config config = new Config(); + config.setNumWorkers(1); + + SentenceSpout spout = new SentenceSpout(); + + // sync the filesystem after every 1k tuples + SyncPolicy syncPolicy = new CountSyncPolicy(1000); + + // rotate files when they reach 5MB + FileRotationPolicy rotationPolicy = new TimedRotationPolicy(1.0f, TimedRotationPolicy.TimeUnit.MINUTES); + + FileNameFormat fileNameFormat = new DefaultFileNameFormat() + .withPath("/tmp/foo/") + .withExtension(".txt"); + + // use "|" instead of "," for field delimiter + RecordFormat format = new DelimitedRecordFormat() + .withFieldDelimiter("|"); + + Yaml yaml = new Yaml(); + InputStream in = new FileInputStream(args[1]); + Map yamlConf = (Map) yaml.load(in); + in.close(); + config.put("hdfs.config", yamlConf); + + HdfsBolt bolt = new HdfsBolt() + .withConfigKey("hdfs.config") + .withFsUrl(args[0]) + .withFileNameFormat(fileNameFormat) + .withRecordFormat(format) + .withRotationPolicy(rotationPolicy) + .withSyncPolicy(syncPolicy) + .addRotationAction(new MoveFileAction().toDestination("/tmp/dest2/")); + + TopologyBuilder builder = new TopologyBuilder(); + + builder.setSpout(SENTENCE_SPOUT_ID, spout, 1); + // SentenceSpout --> MyBolt + builder.setBolt(BOLT_ID, bolt, 4) + .shuffleGrouping(SENTENCE_SPOUT_ID); + String topoName = TOPOLOGY_NAME; + if (args.length == 3) { + topoName = args[2]; + } else if (args.length > 3) { + System.out.println("Usage: HdfsFileTopology [hdfs url] [hdfs yaml config file] "); + return; + } + StormSubmitter.submitTopology(topoName, config, builder.createTopology()); + } + + public static void waitForSeconds(int seconds) { + try { + Thread.sleep(seconds * 1000); + } catch (InterruptedException e) { + //ignore + } + } + + public static class SentenceSpout extends BaseRichSpout { + private ConcurrentHashMap pending; + private SpoutOutputCollector collector; + private String[] sentences = { + "my dog has fleas", + "i like cold beverages", + "the dog ate my homework", + "don't have a cow man", + "i don't think i like fleas" + }; + private int index = 0; + private int count = 0; + private long total = 0L; + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("sentence", "timestamp")); + } + + @Override + public void open(Map config, TopologyContext context, + SpoutOutputCollector collector) { + this.collector = collector; + this.pending = new ConcurrentHashMap(); + } + + @Override + public void nextTuple() { + Values values = new Values(sentences[index], System.currentTimeMillis()); + UUID msgId = UUID.randomUUID(); + this.pending.put(msgId, values); + this.collector.emit(values, msgId); + index++; + if (index >= sentences.length) { + index = 0; + } + count++; + total++; + if (count > 20000) { + count = 0; + System.out.println("Pending count: " + this.pending.size() + ", total: " + this.total); + } + Thread.yield(); + } + + @Override + public void ack(Object msgId) { + this.pending.remove(msgId); + } + + @Override + public void fail(Object msgId) { + System.out.println("**** RESENDING FAILED TUPLE"); + this.collector.emit(this.pending.get(msgId), msgId); + } + } + + public static class MyBolt extends BaseRichBolt { + + private HashMap counts = null; + private OutputCollector collector; + + @Override + public void prepare(Map config, TopologyContext context, OutputCollector collector) { + this.counts = new HashMap(); + this.collector = collector; + } + + @Override + public void execute(Tuple tuple) { + collector.ack(tuple); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + // this bolt does not emit anything + } + + @Override + public void cleanup() { + } + } +} diff --git a/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/bolt/SequenceFileTopology.java b/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/bolt/SequenceFileTopology.java new file mode 100644 index 00000000000..90e0aea0d42 --- /dev/null +++ b/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/bolt/SequenceFileTopology.java @@ -0,0 +1,199 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.bolt; + +import java.io.FileInputStream; +import java.io.InputStream; +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; +import org.apache.hadoop.io.SequenceFile; +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.hdfs.bolt.format.DefaultFileNameFormat; +import org.apache.storm.hdfs.bolt.format.DefaultSequenceFormat; +import org.apache.storm.hdfs.bolt.format.FileNameFormat; +import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy; +import org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy; +import org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy.Units; +import org.apache.storm.hdfs.bolt.sync.CountSyncPolicy; +import org.apache.storm.hdfs.bolt.sync.SyncPolicy; +import org.apache.storm.hdfs.common.rotation.MoveFileAction; +import org.apache.storm.spout.SpoutOutputCollector; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.topology.base.BaseRichBolt; +import org.apache.storm.topology.base.BaseRichSpout; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; +import org.yaml.snakeyaml.Yaml; + +public class SequenceFileTopology { + static final String SENTENCE_SPOUT_ID = "sentence-spout"; + static final String BOLT_ID = "my-bolt"; + static final String TOPOLOGY_NAME = "test-topology"; + + public static void main(String[] args) throws Exception { + Config config = new Config(); + config.setNumWorkers(1); + + SentenceSpout spout = new SentenceSpout(); + + // sync the filesystem after every 1k tuples + SyncPolicy syncPolicy = new CountSyncPolicy(1000); + + // rotate files when they reach 5MB + FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, Units.MB); + + FileNameFormat fileNameFormat = new DefaultFileNameFormat() + .withPath("/tmp/source/") + .withExtension(".seq"); + + // create sequence format instance. + DefaultSequenceFormat format = new DefaultSequenceFormat("timestamp", "sentence"); + + Yaml yaml = new Yaml(); + InputStream in = new FileInputStream(args[1]); + Map yamlConf = (Map) yaml.load(in); + in.close(); + config.put("hdfs.config", yamlConf); + + SequenceFileBolt bolt = new SequenceFileBolt() + .withFsUrl(args[0]) + .withConfigKey("hdfs.config") + .withFileNameFormat(fileNameFormat) + .withSequenceFormat(format) + .withRotationPolicy(rotationPolicy) + .withSyncPolicy(syncPolicy) + .withCompressionType(SequenceFile.CompressionType.RECORD) + .withCompressionCodec("deflate") + .addRotationAction(new MoveFileAction().toDestination("/tmp/dest/")); + + TopologyBuilder builder = new TopologyBuilder(); + + builder.setSpout(SENTENCE_SPOUT_ID, spout, 1); + // SentenceSpout --> MyBolt + builder.setBolt(BOLT_ID, bolt, 4) + .shuffleGrouping(SENTENCE_SPOUT_ID); + + String topoName = TOPOLOGY_NAME; + if (args.length == 3) { + topoName = args[2]; + } else if (args.length > 3) { + System.out.println("Usage: SequenceFileTopology [hdfs url] [hdfs yaml config file] "); + return; + } + StormSubmitter.submitTopology(topoName, config, builder.createTopology()); + } + + public static void waitForSeconds(int seconds) { + try { + Thread.sleep(seconds * 1000); + } catch (InterruptedException e) { + //ignore + } + } + + + public static class SentenceSpout extends BaseRichSpout { + + + private ConcurrentHashMap pending; + private SpoutOutputCollector collector; + private String[] sentences = { + "my dog has fleas", + "i like cold beverages", + "the dog ate my homework", + "don't have a cow man", + "i don't think i like fleas" + }; + private int index = 0; + private int count = 0; + private long total = 0L; + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("sentence", "timestamp")); + } + + @Override + public void open(Map config, TopologyContext context, + SpoutOutputCollector collector) { + this.collector = collector; + this.pending = new ConcurrentHashMap(); + } + + @Override + public void nextTuple() { + Values values = new Values(sentences[index], System.currentTimeMillis()); + UUID msgId = UUID.randomUUID(); + this.pending.put(msgId, values); + this.collector.emit(values, msgId); + index++; + if (index >= sentences.length) { + index = 0; + } + count++; + total++; + if (count > 20000) { + count = 0; + System.out.println("Pending count: " + this.pending.size() + ", total: " + this.total); + } + Thread.yield(); + } + + @Override + public void ack(Object msgId) { + // System.out.println("ACK"); + this.pending.remove(msgId); + } + + @Override + public void fail(Object msgId) { + System.out.println("**** RESENDING FAILED TUPLE"); + this.collector.emit(this.pending.get(msgId), msgId); + } + } + + + public static class MyBolt extends BaseRichBolt { + + private HashMap counts = null; + private OutputCollector collector; + + @Override + public void prepare(Map config, TopologyContext context, OutputCollector collector) { + this.counts = new HashMap(); + this.collector = collector; + } + + @Override + public void execute(Tuple tuple) { + collector.ack(tuple); + } + + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + // this bolt does not emit anything + } + + @Override + public void cleanup() { + } + } +} diff --git a/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/spout/HdfsSpoutTopology.java b/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/spout/HdfsSpoutTopology.java new file mode 100644 index 00000000000..01d44832a49 --- /dev/null +++ b/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/spout/HdfsSpoutTopology.java @@ -0,0 +1,176 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.spout; + +import java.util.Map; +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.generated.ClusterSummary; +import org.apache.storm.generated.ExecutorSummary; +import org.apache.storm.generated.KillOptions; +import org.apache.storm.generated.Nimbus; +import org.apache.storm.generated.SpoutStats; +import org.apache.storm.generated.TopologyInfo; +import org.apache.storm.generated.TopologySummary; +import org.apache.storm.metric.LoggingMetricsConsumer; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.topology.base.BaseRichBolt; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.utils.NimbusClient; +import org.apache.storm.utils.Utils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +public class HdfsSpoutTopology { + + public static final String SPOUT_ID = "hdfsspout"; + public static final String BOLT_ID = "constbolt"; + + /** + * Copies text file content from sourceDir to destinationDir. Moves source files into sourceDir after its done consuming + */ + public static void main(String[] args) throws Exception { + // 0 - validate args + if (args.length < 7) { + System.err.println("Please check command line arguments."); + System.err.println("Usage :"); + System.err.println( + HdfsSpoutTopology.class.toString() + " topologyName hdfsUri fileFormat sourceDir sourceArchiveDir badDir destinationDir."); + System.err.println(" topologyName - topology name."); + System.err.println(" hdfsUri - hdfs name node URI"); + System.err.println(" fileFormat - Set to 'TEXT' for reading text files or 'SEQ' for sequence files."); + System.err.println(" sourceDir - read files from this HDFS dir using HdfsSpout."); + System.err.println(" archiveDir - after a file in sourceDir is read completely, it is moved to this HDFS location."); + System.err.println(" badDir - files that cannot be read properly will be moved to this HDFS location."); + System.err.println(" spoutCount - Num of spout instances."); + System.err.println(); + System.exit(-1); + } + + // 1 - parse cmd line args + String hdfsUri = args[1]; + String fileFormat = args[2]; + String sourceDir = args[3]; + String archiveDir = args[4]; + String badDir = args[5]; + + // 2 - Create and configure topology + Config conf = new Config(); + conf.setNumWorkers(1); + conf.setNumAckers(1); + conf.setMaxTaskParallelism(1); + conf.setDebug(true); + conf.registerMetricsConsumer(LoggingMetricsConsumer.class); + + TopologyBuilder builder = new TopologyBuilder(); + HdfsSpout spout = new HdfsSpout().withOutputFields(TextFileReader.defaultFields) + .setReaderType(fileFormat) + .setHdfsUri(hdfsUri) + .setSourceDir(sourceDir) + .setArchiveDir(archiveDir) + .setBadFilesDir(badDir); + int spoutNum = Integer.parseInt(args[6]); + builder.setSpout(SPOUT_ID, spout, spoutNum); + ConstBolt bolt = new ConstBolt(); + builder.setBolt(BOLT_ID, bolt, 1).shuffleGrouping(SPOUT_ID); + + // 3 - submit topology, wait for a few min and terminate it + Map clusterConf = Utils.readStormConfig(); + String topologyName = args[0]; + StormSubmitter.submitTopologyWithProgressBar(topologyName, conf, builder.createTopology()); + Nimbus.Iface client = NimbusClient.Builder.withConf(clusterConf).build().getClient(); + + // 4 - Print metrics every 30 sec, kill topology after 20 min + for (int i = 0; i < 40; i++) { + Thread.sleep(30 * 1000); + printMetrics(client, topologyName); + } + kill(client, topologyName); + } // main + + private static void kill(Nimbus.Iface client, String topologyName) throws Exception { + KillOptions opts = new KillOptions(); + opts.set_wait_secs(0); + client.killTopologyWithOpts(topologyName, opts); + } + + static void printMetrics(Nimbus.Iface client, String name) throws Exception { + TopologyInfo info = client.getTopologyInfoByName(name); + int uptime = info.get_uptime_secs(); + long acked = 0; + long failed = 0; + double weightedAvgTotal = 0.0; + for (ExecutorSummary exec : info.get_executors()) { + if ("spout".equals(exec.get_component_id())) { + SpoutStats stats = exec.get_stats().get_specific().get_spout(); + Map failedMap = stats.get_failed().get(":all-time"); + Map ackedMap = stats.get_acked().get(":all-time"); + Map avgLatMap = stats.get_complete_ms_avg().get(":all-time"); + for (String key : ackedMap.keySet()) { + if (failedMap != null) { + Long tmp = failedMap.get(key); + if (tmp != null) { + failed += tmp; + } + } + long ackVal = ackedMap.get(key); + double latVal = avgLatMap.get(key) * ackVal; + acked += ackVal; + weightedAvgTotal += latVal; + } + } + } + double avgLatency = weightedAvgTotal / acked; + System.out.println("uptime: " + uptime + + " acked: " + acked + + " avgLatency: " + avgLatency + + " acked/sec: " + (((double) acked) / uptime + " failed: " + failed)); + } + + public static class ConstBolt extends BaseRichBolt { + public static final String FIELDS = "message"; + private static final long serialVersionUID = -5313598399155365865L; + private static final Logger log = LoggerFactory.getLogger(ConstBolt.class); + int count = 0; + private OutputCollector collector; + + public ConstBolt() { + } + + @Override + public void prepare(Map conf, TopologyContext context, OutputCollector collector) { + this.collector = collector; + } + + @Override + public void execute(Tuple tuple) { + log.info("Received tuple : {}", tuple.getValue(0)); + count++; + if (count == 3) { + collector.fail(tuple); + } else { + collector.ack(tuple); + } + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields(FIELDS)); + } + } // class +} diff --git a/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/trident/FixedBatchSpout.java b/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/trident/FixedBatchSpout.java new file mode 100644 index 00000000000..29793f1d18d --- /dev/null +++ b/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/trident/FixedBatchSpout.java @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.hdfs.trident; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import org.apache.storm.Config; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.trident.operation.TridentCollector; +import org.apache.storm.trident.spout.IBatchSpout; +import org.apache.storm.tuple.Fields; + +public class FixedBatchSpout implements IBatchSpout { + + Fields fields; + List[] outputs; + int maxBatchSize; + HashMap>> batches = new HashMap>>(); + int index = 0; + boolean cycle = false; + + public FixedBatchSpout(Fields fields, int maxBatchSize, List... outputs) { + this.fields = fields; + this.outputs = outputs; + this.maxBatchSize = maxBatchSize; + } + + public void setCycle(boolean cycle) { + this.cycle = cycle; + } + + @Override + public void open(Map conf, TopologyContext context) { + index = 0; + } + + @Override + public void emitBatch(long batchId, TridentCollector collector) { + List> batch = this.batches.get(batchId); + if (batch == null) { + batch = new ArrayList>(); + if (index >= outputs.length && cycle) { + index = 0; + } + for (int i = 0; i < maxBatchSize; index++, i++) { + if (index == outputs.length) { + index = 0; + } + batch.add(outputs[index]); + } + this.batches.put(batchId, batch); + } + for (List list : batch) { + collector.emit(list); + } + } + + @Override + public void ack(long batchId) { + this.batches.remove(batchId); + } + + @Override + public void close() { + } + + @Override + public Map getComponentConfiguration() { + Config conf = new Config(); + conf.setMaxTaskParallelism(1); + return conf; + } + + @Override + public Fields getOutputFields() { + return fields; + } +} diff --git a/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/trident/TridentFileTopology.java b/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/trident/TridentFileTopology.java new file mode 100644 index 00000000000..b722497183c --- /dev/null +++ b/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/trident/TridentFileTopology.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.hdfs.trident; + +import java.io.FileInputStream; +import java.io.InputStream; +import java.util.Map; +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.generated.StormTopology; +import org.apache.storm.hdfs.trident.format.DefaultFileNameFormat; +import org.apache.storm.hdfs.trident.format.DelimitedRecordFormat; +import org.apache.storm.hdfs.trident.format.FileNameFormat; +import org.apache.storm.hdfs.trident.format.RecordFormat; +import org.apache.storm.hdfs.trident.rotation.FileRotationPolicy; +import org.apache.storm.hdfs.trident.rotation.FileSizeRotationPolicy; +import org.apache.storm.trident.Stream; +import org.apache.storm.trident.TridentState; +import org.apache.storm.trident.TridentTopology; +import org.apache.storm.trident.state.StateFactory; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Values; +import org.yaml.snakeyaml.Yaml; + +public class TridentFileTopology { + + public static StormTopology buildTopology(String hdfsUrl) { + FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence", "key"), 1000, new Values("the cow jumped over the moon", 1L), + new Values("the man went to the store and bought some candy", 2L), + new Values("four score and seven years ago", 3L), + new Values("how many apples can you eat", 4L), + new Values("to be or not to be the person", 5L)); + spout.setCycle(true); + + TridentTopology topology = new TridentTopology(); + Stream stream = topology.newStream("spout1", spout); + + Fields hdfsFields = new Fields("sentence", "key"); + + FileNameFormat fileNameFormat = new DefaultFileNameFormat() + .withPath("/tmp/trident") + .withPrefix("trident") + .withExtension(".txt"); + + RecordFormat recordFormat = new DelimitedRecordFormat() + .withFields(hdfsFields); + + FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, FileSizeRotationPolicy.Units.MB); + + HdfsState.Options options = new HdfsState.HdfsFileOptions() + .withFileNameFormat(fileNameFormat) + .withRecordFormat(recordFormat) + .withRotationPolicy(rotationPolicy) + .withFsUrl(hdfsUrl) + .withConfigKey("hdfs.config"); + + StateFactory factory = new HdfsStateFactory().withOptions(options); + + TridentState state = stream + .partitionPersist(factory, hdfsFields, new HdfsUpdater(), new Fields()); + + return topology.build(); + } + + public static void main(String[] args) throws Exception { + Config conf = new Config(); + conf.setMaxSpoutPending(5); + + Yaml yaml = new Yaml(); + InputStream in = new FileInputStream(args[1]); + Map yamlConf = (Map) yaml.load(in); + in.close(); + conf.put("hdfs.config", yamlConf); + String topoName = "wordCounter"; + if (args.length == 3) { + topoName = args[2]; + } else if (args.length > 3) { + System.out.println("Usage: TridentFileTopology [hdfs url] [hdfs yaml config file] "); + return; + } + conf.setNumWorkers(3); + StormSubmitter.submitTopology(topoName, conf, buildTopology(args[0])); + } +} diff --git a/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/trident/TridentSequenceTopology.java b/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/trident/TridentSequenceTopology.java new file mode 100644 index 00000000000..74d1d5c8dc5 --- /dev/null +++ b/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/trident/TridentSequenceTopology.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.hdfs.trident; + +import java.io.FileInputStream; +import java.io.InputStream; +import java.util.Map; +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.generated.StormTopology; +import org.apache.storm.hdfs.common.rotation.MoveFileAction; +import org.apache.storm.hdfs.trident.format.DefaultFileNameFormat; +import org.apache.storm.hdfs.trident.format.DefaultSequenceFormat; +import org.apache.storm.hdfs.trident.format.FileNameFormat; +import org.apache.storm.hdfs.trident.rotation.FileRotationPolicy; +import org.apache.storm.hdfs.trident.rotation.FileSizeRotationPolicy; +import org.apache.storm.trident.Stream; +import org.apache.storm.trident.TridentState; +import org.apache.storm.trident.TridentTopology; +import org.apache.storm.trident.state.StateFactory; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Values; +import org.yaml.snakeyaml.Yaml; + +public class TridentSequenceTopology { + + public static StormTopology buildTopology(String hdfsUrl) { + FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence", "key"), 1000, new Values("the cow jumped over the moon", 1L), + new Values("the man went to the store and bought some candy", 2L), + new Values("four score and seven years ago", 3L), + new Values("how many apples can you eat", 4L), + new Values("to be or not to be the person", 5L)); + spout.setCycle(true); + + TridentTopology topology = new TridentTopology(); + Stream stream = topology.newStream("spout1", spout); + + Fields hdfsFields = new Fields("sentence", "key"); + + FileNameFormat fileNameFormat = new DefaultFileNameFormat() + .withPath("/tmp/trident") + .withPrefix("trident") + .withExtension(".seq"); + + FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, FileSizeRotationPolicy.Units.MB); + + HdfsState.Options seqOpts = new HdfsState.SequenceFileOptions() + .withFileNameFormat(fileNameFormat) + .withSequenceFormat(new DefaultSequenceFormat("key", "sentence")) + .withRotationPolicy(rotationPolicy) + .withFsUrl(hdfsUrl) + .withConfigKey("hdfs.config") + .addRotationAction(new MoveFileAction().toDestination("/tmp/dest2/")); + StateFactory factory = new HdfsStateFactory().withOptions(seqOpts); + + TridentState state = stream + .partitionPersist(factory, hdfsFields, new HdfsUpdater(), new Fields()); + + return topology.build(); + } + + public static void main(String[] args) throws Exception { + Config conf = new Config(); + conf.setMaxSpoutPending(5); + + Yaml yaml = new Yaml(); + InputStream in = new FileInputStream(args[1]); + Map yamlConf = (Map) yaml.load(in); + in.close(); + conf.put("hdfs.config", yamlConf); + String topoName = "wordCounter"; + if (args.length == 3) { + topoName = args[2]; + } else if (args.length > 3) { + System.out.println("Usage: TridentSequenceTopology []"); + return; + } + + conf.setNumWorkers(3); + StormSubmitter.submitTopology(topoName, conf, buildTopology(args[0])); + } +} diff --git a/examples/storm-jdbc-examples/pom.xml b/examples/storm-jdbc-examples/pom.xml new file mode 100644 index 00000000000..60fba3ec086 --- /dev/null +++ b/examples/storm-jdbc-examples/pom.xml @@ -0,0 +1,96 @@ + + + + 4.0.0 + + + storm + org.apache.storm + 2.8.4-SNAPSHOT + ../../pom.xml + + + storm-jdbc-examples + + + + org.apache.storm + storm-client + ${project.version} + ${provided.scope} + + + org.apache.storm + storm-jdbc + ${project.version} + + + + + + + org.apache.maven.plugins + maven-shade-plugin + + true + + + *:* + + META-INF/*.SF + META-INF/*.sf + META-INF/*.DSA + META-INF/*.dsa + META-INF/*.RSA + META-INF/*.rsa + META-INF/*.EC + META-INF/*.ec + META-INF/MSFTSIG.SF + META-INF/MSFTSIG.RSA + + + + + + + package + + shade + + + + + + + + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + + + org.apache.maven.plugins + maven-pmd-plugin + + + + diff --git a/examples/storm-jdbc-examples/src/main/java/org/apache/storm/jdbc/spout/UserSpout.java b/examples/storm-jdbc-examples/src/main/java/org/apache/storm/jdbc/spout/UserSpout.java new file mode 100644 index 00000000000..6854722191d --- /dev/null +++ b/examples/storm-jdbc-examples/src/main/java/org/apache/storm/jdbc/spout/UserSpout.java @@ -0,0 +1,99 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.jdbc.spout; + +import com.google.common.collect.Lists; + +import java.util.List; +import java.util.Map; +import java.util.Random; + +import org.apache.storm.spout.SpoutOutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.IRichSpout; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Values; + +public class UserSpout implements IRichSpout { + boolean isDistributed; + SpoutOutputCollector collector; + public static final List rows = Lists.newArrayList( + new Values(1, "peter", System.currentTimeMillis()), + new Values(2, "bob", System.currentTimeMillis()), + new Values(3, "alice", System.currentTimeMillis())); + + public UserSpout() { + this(true); + } + + public UserSpout(boolean isDistributed) { + this.isDistributed = isDistributed; + } + + public boolean isDistributed() { + return this.isDistributed; + } + + @Override + public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { + this.collector = collector; + } + + @Override + public void close() { + + } + + @Override + public void nextTuple() { + final Random rand = new Random(); + final Values row = rows.get(rand.nextInt(rows.size() - 1)); + this.collector.emit(row); + Thread.yield(); + } + + @Override + public void ack(Object msgId) { + + } + + @Override + public void fail(Object msgId) { + + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("user_id", "user_name", "create_date")); + } + + @Override + public void activate() { + } + + @Override + public void deactivate() { + } + + @Override + public Map getComponentConfiguration() { + return null; + } +} diff --git a/examples/storm-jdbc-examples/src/main/java/org/apache/storm/jdbc/topology/AbstractUserTopology.java b/examples/storm-jdbc-examples/src/main/java/org/apache/storm/jdbc/topology/AbstractUserTopology.java new file mode 100644 index 00000000000..aa7f68609a2 --- /dev/null +++ b/examples/storm-jdbc-examples/src/main/java/org/apache/storm/jdbc/topology/AbstractUserTopology.java @@ -0,0 +1,120 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.jdbc.topology; + +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; + +import java.sql.Types; +import java.util.List; +import java.util.Map; + +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.generated.StormTopology; +import org.apache.storm.jdbc.common.Column; +import org.apache.storm.jdbc.common.ConnectionProvider; +import org.apache.storm.jdbc.common.HikariCPConnectionProvider; +import org.apache.storm.jdbc.common.JdbcClient; +import org.apache.storm.jdbc.mapper.JdbcLookupMapper; +import org.apache.storm.jdbc.mapper.JdbcMapper; +import org.apache.storm.jdbc.mapper.SimpleJdbcLookupMapper; +import org.apache.storm.jdbc.mapper.SimpleJdbcMapper; +import org.apache.storm.jdbc.spout.UserSpout; +import org.apache.storm.tuple.Fields; + +/** + * For topology-related code reusage. + */ +public abstract class AbstractUserTopology { + private static final List setupSqls = Lists.newArrayList( + "drop table if exists user", + "drop table if exists department", + "drop table if exists user_department", + "create table if not exists user (user_id integer, user_name varchar(100), dept_name varchar(100), create_date date)", + "create table if not exists department (dept_id integer, dept_name varchar(100))", + "create table if not exists user_department (user_id integer, dept_id integer)", + "insert into department values (1, 'R&D')", + "insert into department values (2, 'Finance')", + "insert into department values (3, 'HR')", + "insert into department values (4, 'Sales')", + "insert into user_department values (1, 1)", + "insert into user_department values (2, 2)", + "insert into user_department values (3, 3)", + "insert into user_department values (4, 4)" + ); + protected UserSpout userSpout; + protected JdbcMapper jdbcMapper; + protected JdbcLookupMapper jdbcLookupMapper; + protected ConnectionProvider connectionProvider; + + protected static final String TABLE_NAME = "user"; + protected static final String JDBC_CONF = "jdbc.conf"; + protected static final String SELECT_QUERY = "select dept_name from department, " + + "user_department where department.dept_id = user_department.dept_id " + + "and user_department.user_id = ?"; + + /** + * A main method template to extend. + * @param args main method arguments + * @throws Exception any expection occuring durch cluster setup or operation + */ + public void execute(String[] args) throws Exception { + if (args.length != 4 && args.length != 5) { + System.out.println("Usage: " + this.getClass().getSimpleName() + " " + + " [topology name]"); + System.exit(-1); + } + Map map = Maps.newHashMap(); + map.put("dataSourceClassName", args[0]); //com.mysql.jdbc.jdbc2.optional.MysqlDataSource + map.put("dataSource.url", args[1]); //jdbc:mysql://localhost/test + map.put("dataSource.user", args[2]); //root + + if (args.length == 4) { + map.put("dataSource.password", args[3]); //password + } + + Config config = new Config(); + config.put(JDBC_CONF, map); + + ConnectionProvider connectionProvider = new HikariCPConnectionProvider(map); + connectionProvider.prepare(); + int queryTimeoutSecs = 60; + JdbcClient jdbcClient = new JdbcClient(connectionProvider, queryTimeoutSecs); + for (String sql : setupSqls) { + jdbcClient.executeSql(sql); + } + + this.userSpout = new UserSpout(); + this.jdbcMapper = new SimpleJdbcMapper(TABLE_NAME, connectionProvider); + connectionProvider.cleanup(); + Fields outputFields = new Fields("user_id", "user_name", "dept_name", "create_date"); + List queryParamColumns = Lists.newArrayList(new Column("user_id", Types.INTEGER)); + this.jdbcLookupMapper = new SimpleJdbcLookupMapper(outputFields, queryParamColumns); + this.connectionProvider = new HikariCPConnectionProvider(map); + String topoName = "test"; + if (args.length > 4) { + topoName = args[4]; + } + StormSubmitter.submitTopology(topoName, config, getTopology()); + } + + public abstract StormTopology getTopology(); + +} diff --git a/examples/storm-jdbc-examples/src/main/java/org/apache/storm/jdbc/topology/UserPersistenceTopology.java b/examples/storm-jdbc-examples/src/main/java/org/apache/storm/jdbc/topology/UserPersistenceTopology.java new file mode 100644 index 00000000000..7d4129341f6 --- /dev/null +++ b/examples/storm-jdbc-examples/src/main/java/org/apache/storm/jdbc/topology/UserPersistenceTopology.java @@ -0,0 +1,66 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.jdbc.topology; + +import com.google.common.collect.Lists; + +import java.sql.Types; +import java.util.List; + +import org.apache.storm.generated.StormTopology; +import org.apache.storm.jdbc.bolt.JdbcInsertBolt; +import org.apache.storm.jdbc.bolt.JdbcLookupBolt; +import org.apache.storm.jdbc.common.Column; +import org.apache.storm.jdbc.mapper.JdbcMapper; +import org.apache.storm.jdbc.mapper.SimpleJdbcMapper; +import org.apache.storm.topology.TopologyBuilder; + + +public class UserPersistenceTopology extends AbstractUserTopology { + private static final String USER_SPOUT = "USER_SPOUT"; + private static final String LOOKUP_BOLT = "LOOKUP_BOLT"; + private static final String PERSISTENCE_BOLT = "PERSISTENCE_BOLT"; + + public static void main(String[] args) throws Exception { + new UserPersistenceTopology().execute(args); + } + + @Override + public StormTopology getTopology() { + JdbcLookupBolt departmentLookupBolt = new JdbcLookupBolt(connectionProvider, SELECT_QUERY, this.jdbcLookupMapper); + + //must specify column schema when providing custom query. + List schemaColumns = Lists.newArrayList(new Column("create_date", Types.DATE), + new Column("dept_name", Types.VARCHAR), + new Column("user_id", Types.INTEGER), + new Column("user_name", Types.VARCHAR)); + JdbcMapper mapper = new SimpleJdbcMapper(schemaColumns); + + JdbcInsertBolt userPersistenceBolt = new JdbcInsertBolt(connectionProvider, mapper) + .withInsertQuery("insert into user (create_date, dept_name, user_id, user_name) values (?,?,?,?)"); + + // userSpout ==> jdbcBolt + TopologyBuilder builder = new TopologyBuilder(); + + builder.setSpout(USER_SPOUT, this.userSpout, 1); + builder.setBolt(LOOKUP_BOLT, departmentLookupBolt, 1).shuffleGrouping(USER_SPOUT); + builder.setBolt(PERSISTENCE_BOLT, userPersistenceBolt, 1).shuffleGrouping(LOOKUP_BOLT); + return builder.createTopology(); + } +} diff --git a/examples/storm-jdbc-examples/src/main/java/org/apache/storm/jdbc/topology/UserPersistenceTridentTopology.java b/examples/storm-jdbc-examples/src/main/java/org/apache/storm/jdbc/topology/UserPersistenceTridentTopology.java new file mode 100644 index 00000000000..1c38c351efd --- /dev/null +++ b/examples/storm-jdbc-examples/src/main/java/org/apache/storm/jdbc/topology/UserPersistenceTridentTopology.java @@ -0,0 +1,70 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.jdbc.topology; + +import com.google.common.collect.Lists; + +import java.sql.Types; + +import org.apache.storm.generated.StormTopology; +import org.apache.storm.jdbc.common.Column; +import org.apache.storm.jdbc.mapper.SimpleJdbcLookupMapper; +import org.apache.storm.jdbc.spout.UserSpout; +import org.apache.storm.jdbc.trident.state.JdbcQuery; +import org.apache.storm.jdbc.trident.state.JdbcState; +import org.apache.storm.jdbc.trident.state.JdbcStateFactory; +import org.apache.storm.jdbc.trident.state.JdbcUpdater; +import org.apache.storm.trident.Stream; +import org.apache.storm.trident.TridentState; +import org.apache.storm.trident.TridentTopology; +import org.apache.storm.tuple.Fields; + +public class UserPersistenceTridentTopology extends AbstractUserTopology { + + public static void main(String[] args) throws Exception { + new UserPersistenceTridentTopology().execute(args); + } + + @Override + public StormTopology getTopology() { + TridentTopology topology = new TridentTopology(); + + JdbcState.Options options = new JdbcState.Options() + .withConnectionProvider(connectionProvider) + .withMapper(this.jdbcMapper) + .withJdbcLookupMapper(new SimpleJdbcLookupMapper(new Fields("dept_name"), + Lists.newArrayList(new Column("user_id", Types.INTEGER)))) + .withTableName(TABLE_NAME) + .withSelectQuery(SELECT_QUERY); + + JdbcStateFactory jdbcStateFactory = new JdbcStateFactory(options); + + Stream stream = topology.newStream("userSpout", new UserSpout()); + TridentState state = topology.newStaticState(jdbcStateFactory); + stream = stream.stateQuery(state, + new Fields("user_id", "user_name", "create_date"), + new JdbcQuery(), + new Fields("dept_name")); + stream.partitionPersist(jdbcStateFactory, + new Fields("user_id", "user_name", "dept_name", "create_date"), + new JdbcUpdater(), + new Fields()); + return topology.build(); + } +} diff --git a/examples/storm-jms-examples/README.markdown b/examples/storm-jms-examples/README.markdown new file mode 100644 index 00000000000..7a4d8f07f5a --- /dev/null +++ b/examples/storm-jms-examples/README.markdown @@ -0,0 +1,12 @@ +## About Storm JMS Examples +This project contains a simple storm topology that illustrates the usage of "storm-jms". + +To build: + +`mvn clean install` + +The default build will create a jar file that can be deployed to to a Storm cluster in the "target" directory: + +`storm-jms-examples-0.1-SNAPSHOT-jar-with-dependencies.jar` + + diff --git a/examples/storm-jms-examples/pom.xml b/examples/storm-jms-examples/pom.xml new file mode 100644 index 00000000000..3149b1b6f2f --- /dev/null +++ b/examples/storm-jms-examples/pom.xml @@ -0,0 +1,115 @@ + + + + 4.0.0 + + + storm + org.apache.storm + 2.8.4-SNAPSHOT + ../../pom.xml + + + + storm-jms-examples + + + 6.2.12 + + + + org.springframework + spring-beans + ${spring.version} + + + org.springframework + spring-core + ${spring.version} + + + org.springframework + spring-context + ${spring.version} + + + org.springframework + spring-jms + ${spring.version} + + + org.apache.xbean + xbean-spring + 4.28 + + + org.apache.storm + storm-client + ${project.version} + + ${provided.scope} + + + org.apache.storm + storm-jms + ${project.version} + + + org.apache.activemq + activemq-client + ${activemq.version} + + + + + + + maven-assembly-plugin + + + jar-with-dependencies + + + + + + + + + make-assembly + package + + single + + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + + + org.apache.maven.plugins + maven-pmd-plugin + + + + diff --git a/examples/storm-jms-examples/src/main/java/org/apache/storm/jms/example/ExampleJmsTopology.java b/examples/storm-jms-examples/src/main/java/org/apache/storm/jms/example/ExampleJmsTopology.java new file mode 100644 index 00000000000..aea290b752c --- /dev/null +++ b/examples/storm-jms-examples/src/main/java/org/apache/storm/jms/example/ExampleJmsTopology.java @@ -0,0 +1,126 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.jms.example; + +import jakarta.jms.JMSException; +import jakarta.jms.Message; +import jakarta.jms.Session; +import jakarta.jms.TextMessage; +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.jms.JmsMessageProducer; +import org.apache.storm.jms.JmsProvider; +import org.apache.storm.jms.JmsTupleProducer; +import org.apache.storm.jms.bolt.JmsBolt; +import org.apache.storm.jms.spout.JmsSpout; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.ITuple; + +/** + * An example JMS topology. + */ +public class ExampleJmsTopology { + public static final String JMS_QUEUE_SPOUT = "JMS_QUEUE_SPOUT"; + public static final String INTERMEDIATE_BOLT = "INTERMEDIATE_BOLT"; + public static final String FINAL_BOLT = "FINAL_BOLT"; + public static final String JMS_TOPIC_BOLT = "JMS_TOPIC_BOLT"; + public static final String JMS_TOPIC_SPOUT = "JMS_TOPIC_SPOUT"; + public static final String ANOTHER_BOLT = "ANOTHER_BOLT"; + + /** + * The main method. + * @param args takes the topology name as first argument + * @throws Exception any expection occuring durch cluster setup or operation + */ + @SuppressWarnings("serial") + public static void main(String[] args) throws Exception { + + // JMS Queue Provider + JmsProvider jmsQueueProvider = new SpringJmsProvider( + "jms-activemq.xml", "jmsConnectionFactory", + "notificationQueue"); + + // JMS Producer + JmsTupleProducer producer = new JsonTupleProducer(); + + // JMS Queue Spout + JmsSpout queueSpout = new JmsSpout(); + queueSpout.setJmsProvider(jmsQueueProvider); + queueSpout.setJmsTupleProducer(producer); + queueSpout.setJmsAcknowledgeMode(Session.CLIENT_ACKNOWLEDGE); + + TopologyBuilder builder = new TopologyBuilder(); + + // spout with 5 parallel instances + builder.setSpout(JMS_QUEUE_SPOUT, queueSpout, 5); + // intermediate bolt, subscribes to jms spout, anchors on tuples, and auto-acks + builder.setBolt(INTERMEDIATE_BOLT, + new GenericBolt("INTERMEDIATE_BOLT", true, true, new Fields("json")), 3).shuffleGrouping( + JMS_QUEUE_SPOUT); + + // bolt that subscribes to the intermediate bolt, and auto-acks + // messages. + builder.setBolt(FINAL_BOLT, new GenericBolt("FINAL_BOLT", true, true), 3).shuffleGrouping( + INTERMEDIATE_BOLT); + + // JMS Topic provider + JmsProvider jmsTopicProvider = new SpringJmsProvider( + "jms-activemq.xml", "jmsConnectionFactory", + "notificationTopic"); + + // bolt that subscribes to the intermediate bolt, and publishes to a JMS Topic + JmsBolt jmsBolt = new JmsBolt(); + jmsBolt.setJmsProvider(jmsTopicProvider); + + // anonymous message producer just calls toString() on the tuple to create a jms message + jmsBolt.setJmsMessageProducer(new JmsMessageProducer() { + @Override + public Message toMessage(Session session, ITuple input) throws JMSException { + System.out.println("Sending JMS Message:" + input.toString()); + TextMessage tm = session.createTextMessage(input.toString()); + return tm; + } + }); + + builder.setBolt(JMS_TOPIC_BOLT, jmsBolt).shuffleGrouping(INTERMEDIATE_BOLT); + + // JMS Topic spout + JmsSpout topicSpout = new JmsSpout(); + topicSpout.setJmsProvider(jmsTopicProvider); + topicSpout.setJmsTupleProducer(producer); + topicSpout.setJmsAcknowledgeMode(Session.CLIENT_ACKNOWLEDGE); + topicSpout.setDistributed(false); + + builder.setSpout(JMS_TOPIC_SPOUT, topicSpout); + + builder.setBolt(ANOTHER_BOLT, new GenericBolt("ANOTHER_BOLT", true, true), 1).shuffleGrouping( + JMS_TOPIC_SPOUT); + + Config conf = new Config(); + String topoName = "storm-jms-example"; + if (args.length > 0) { + topoName = args[0]; + } + conf.setNumWorkers(3); + + StormSubmitter.submitTopology(topoName, conf, + builder.createTopology()); + } +} diff --git a/examples/storm-jms-examples/src/main/java/org/apache/storm/jms/example/GenericBolt.java b/examples/storm-jms-examples/src/main/java/org/apache/storm/jms/example/GenericBolt.java new file mode 100644 index 00000000000..2137a25ee5b --- /dev/null +++ b/examples/storm-jms-examples/src/main/java/org/apache/storm/jms/example/GenericBolt.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.jms.example; + +import java.util.Map; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.base.BaseRichBolt; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A generic org.apache.storm.topology.IRichBolt implementation + * for testing/debugging the Storm JMS Spout and example topologies. + * + *

For debugging purposes, set the log level of the + * org.apache.storm.contrib.jms package to DEBUG for debugging + * output.

+ * + * @author tgoetz + */ +@SuppressWarnings("serial") +public class GenericBolt extends BaseRichBolt { + private static final Logger LOG = LoggerFactory.getLogger(GenericBolt.class); + private OutputCollector collector; + private boolean autoAck = false; + private boolean autoAnchor = false; + private Fields declaredFields; + private String name; + + /** + * Constructs a new GenericBolt instance. + * + * @param name The name of the bolt (used in DEBUG logging) + * @param autoAck Whether or not this bolt should automatically acknowledge received tuples. + * @param autoAnchor Whether or not this bolt should automatically anchor to received tuples. + * @param declaredFields The fields this bolt declares as output. + */ + public GenericBolt(String name, boolean autoAck, boolean autoAnchor, Fields declaredFields) { + this.name = name; + this.autoAck = autoAck; + this.autoAnchor = autoAnchor; + this.declaredFields = declaredFields; + } + + public GenericBolt(String name, boolean autoAck, boolean autoAnchor) { + this(name, autoAck, autoAnchor, null); + } + + @Override + public void prepare(Map topoConf, TopologyContext context, + OutputCollector collector) { + this.collector = collector; + + } + + @Override + public void execute(Tuple input) { + LOG.debug("[" + this.name + "] Received message: " + input); + + + // only emit if we have declared fields. + if (this.declaredFields != null) { + LOG.debug("[" + this.name + "] emitting: " + input); + if (this.autoAnchor) { + this.collector.emit(input, input.getValues()); + } else { + this.collector.emit(input.getValues()); + } + } + + if (this.autoAck) { + LOG.debug("[" + this.name + "] ACKing tuple: " + input); + this.collector.ack(input); + } + + } + + @Override + public void cleanup() { + + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + if (this.declaredFields != null) { + declarer.declare(this.declaredFields); + } + } + + public boolean isAutoAck() { + return this.autoAck; + } + + public void setAutoAck(boolean autoAck) { + this.autoAck = autoAck; + } + +} diff --git a/examples/storm-jms-examples/src/main/java/org/apache/storm/jms/example/JsonTupleProducer.java b/examples/storm-jms-examples/src/main/java/org/apache/storm/jms/example/JsonTupleProducer.java new file mode 100644 index 00000000000..91c74e9c023 --- /dev/null +++ b/examples/storm-jms-examples/src/main/java/org/apache/storm/jms/example/JsonTupleProducer.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.jms.example; + +import jakarta.jms.JMSException; +import jakarta.jms.Message; +import jakarta.jms.TextMessage; +import org.apache.storm.jms.JmsTupleProducer; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Values; + +/** + * A simple JmsTupleProducer that expects to receive + * JMS TextMessage objects with a body in JSON format. + *

+ * Ouputs a tuple with field name "json" and a string value + * containing the raw json. + *

+ * NOTE: Currently this implementation assumes the text is valid + * JSON and does not attempt to parse or validate it. + * + * @author tgoetz + * + */ +@SuppressWarnings("serial") +public class JsonTupleProducer implements JmsTupleProducer { + + @Override + public Values toTuple(Message msg) throws JMSException { + if (msg instanceof TextMessage) { + String json = ((TextMessage) msg).getText(); + return new Values(json); + } else { + return null; + } + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("json")); + } + +} diff --git a/examples/storm-jms-examples/src/main/java/org/apache/storm/jms/example/SpringJmsProvider.java b/examples/storm-jms-examples/src/main/java/org/apache/storm/jms/example/SpringJmsProvider.java new file mode 100644 index 00000000000..334a98e3fd7 --- /dev/null +++ b/examples/storm-jms-examples/src/main/java/org/apache/storm/jms/example/SpringJmsProvider.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.jms.example; + +import jakarta.jms.ConnectionFactory; +import jakarta.jms.Destination; +import org.apache.storm.jms.JmsProvider; +import org.springframework.context.ApplicationContext; +import org.springframework.context.support.ClassPathXmlApplicationContext; + +/** + * A JmsProvider that uses the spring framework + * to obtain a JMS ConnectionFactory and + * Desitnation objects. + * + *

The constructor takes three arguments: + *

    + *
  1. A string pointing to the the spring application context file contining the JMS configuration + * (must be on the classpath) + *
  2. + *
  3. The name of the connection factory bean
  4. + *
  5. The name of the destination bean
  6. + *

+ */ +@SuppressWarnings("serial") +public class SpringJmsProvider implements JmsProvider { + private ConnectionFactory connectionFactory; + private Destination destination; + + /** + * Constructs a SpringJmsProvider object given the name of a + * classpath resource (the spring application context file), and the bean + * names of a JMS connection factory and destination. + * + * @param appContextClasspathResource - the spring configuration file (classpath resource) + * @param connectionFactoryBean - the JMS connection factory bean name + * @param destinationBean - the JMS destination bean name + */ + public SpringJmsProvider(String appContextClasspathResource, String connectionFactoryBean, String destinationBean) { + ApplicationContext context = new ClassPathXmlApplicationContext(appContextClasspathResource); + this.connectionFactory = (ConnectionFactory) context.getBean(connectionFactoryBean); + this.destination = (Destination) context.getBean(destinationBean); + } + + @Override + public ConnectionFactory connectionFactory() throws Exception { + return this.connectionFactory; + } + + @Override + public Destination destination() throws Exception { + return this.destination; + } + +} diff --git a/examples/storm-jms-examples/src/main/resources/jms-activemq.xml b/examples/storm-jms-examples/src/main/resources/jms-activemq.xml new file mode 100644 index 00000000000..1a845b81303 --- /dev/null +++ b/examples/storm-jms-examples/src/main/resources/jms-activemq.xml @@ -0,0 +1,53 @@ + + + + + + + + + + + + + + + + + + + + + + + diff --git a/examples/storm-jms-examples/src/main/resources/log4j.properties b/examples/storm-jms-examples/src/main/resources/log4j.properties new file mode 100644 index 00000000000..079b195e0e3 --- /dev/null +++ b/examples/storm-jms-examples/src/main/resources/log4j.properties @@ -0,0 +1,29 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +log4j.rootLogger=INFO, stdout + +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout + +log4j.appender.stdout.layout.ConversionPattern=%5p (%C:%L) - %m%n + + +log4j.logger.backtype.storm.contrib=DEBUG +log4j.logger.clojure.contrib=WARN +log4j.logger.org.springframework=WARN +log4j.logger.org.apache.zookeeper=WARN + diff --git a/examples/storm-kafka-client-examples/README.markdown b/examples/storm-kafka-client-examples/README.markdown new file mode 100644 index 00000000000..d4c28a13e3b --- /dev/null +++ b/examples/storm-kafka-client-examples/README.markdown @@ -0,0 +1,10 @@ +## Usage +This module contains example topologies demonstrating storm-kafka-client spout and Trident usage. Please ensure you have a Kafka instance running at localhost:9092 before you deploy the topologies. + +The module is built by running `mvn clean package -Dstorm.kafka.client.version=`, where the property should match the Kafka version you want to use. For example, for Kafka 0.11.0.0 the `` would be `0.11.0.0`. This will generate the `target/storm-kafka-client-examples-VERSION.jar` file. The jar contains all dependencies and can be submitted to Storm via the Storm CLI, e.g. +``` +storm jar storm-kafka-client-examples-2.0.0-SNAPSHOT.jar org.apache.storm.kafka.spout.KafkaSpoutTopologyMainNamedTopics +``` +will submit the topologies set up by KafkaSpoutTopologyMainNamedTopics to Storm. + +Note that this example produces a jar containing all dependencies for ease of use. When you deploy your own topologies in a production environment you may want to reduce the jar size by extracting some dependencies (e.g. `org.apache.kafka:kafka-clients`) from the jar. You can do this by setting the dependencies you don't want to include in the jars to `provided` scope, and then using the `--artifacts` flag for the `storm jar` command to fetch the dependencies when submitting the topology. See the [CLI documentation](http://storm.apache.org/releases/2.0.0-SNAPSHOT/Command-line-client.html) for syntax. \ No newline at end of file diff --git a/examples/storm-kafka-client-examples/pom.xml b/examples/storm-kafka-client-examples/pom.xml new file mode 100644 index 00000000000..d7003fcb71d --- /dev/null +++ b/examples/storm-kafka-client-examples/pom.xml @@ -0,0 +1,116 @@ + + + + + + storm + org.apache.storm + 2.8.4-SNAPSHOT + ../../pom.xml + + + 4.0.0 + + storm-kafka-client-examples + + + + org.slf4j + slf4j-api + + + org.apache.storm + storm-client + ${project.version} + ${provided.scope} + + + org.apache.storm + storm-kafka-client + ${project.version} + compile + + + org.apache.kafka + kafka-clients + compile + + + org.apache.storm + storm-server + ${project.version} + test + + + + + + + org.apache.maven.plugins + maven-shade-plugin + + true + + + *:* + + META-INF/*.SF + META-INF/*.sf + META-INF/*.DSA + META-INF/*.dsa + META-INF/*.RSA + META-INF/*.rsa + META-INF/*.EC + META-INF/*.ec + META-INF/MSFTSIG.SF + META-INF/MSFTSIG.RSA + + + + + + + package + + shade + + + + + + org.apache.storm.kafka.trident.TridentKafkaClientWordCountNamedTopics + + + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + + + org.apache.maven.plugins + maven-pmd-plugin + + + + + diff --git a/examples/storm-kafka-client-examples/src/main/java/org/apache/storm/kafka/bolt/KafkaProducerTopology.java b/examples/storm-kafka-client-examples/src/main/java/org/apache/storm/kafka/bolt/KafkaProducerTopology.java new file mode 100644 index 00000000000..f57d98d4d03 --- /dev/null +++ b/examples/storm-kafka-client-examples/src/main/java/org/apache/storm/kafka/bolt/KafkaProducerTopology.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.kafka.bolt; + +import java.util.Properties; +import java.util.UUID; +import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.storm.generated.StormTopology; +import org.apache.storm.kafka.bolt.mapper.FieldNameBasedTupleToKafkaMapper; +import org.apache.storm.kafka.bolt.selector.DefaultTopicSelector; +import org.apache.storm.lambda.LambdaSpout; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.utils.Utils; + +public class KafkaProducerTopology { + + /** + * Create a new topology that writes random UUIDs to Kafka. + * + * @param brokerUrl Kafka broker URL + * @param topicName Topic to which publish sentences + * @return A Storm topology that produces random UUIDs using a {@link LambdaSpout} and uses a {@link KafkaBolt} to publish the UUIDs to + * the kafka topic specified + */ + public static StormTopology newTopology(String brokerUrl, String topicName) { + final TopologyBuilder builder = new TopologyBuilder(); + builder.setSpout("spout", () -> { + Utils.sleep(1000); //Throttle this spout a bit to avoid maxing out CPU + return UUID.randomUUID().toString(); + }); + + /* The output field of the spout ("lambda") is provided as the boltMessageField + so that this gets written out as the message in the kafka topic. + The tuples have no key field, so the messages are written to Kafka without a key.*/ + final KafkaBolt bolt = new KafkaBolt() + .withProducerProperties(newProps(brokerUrl, topicName)) + .withTopicSelector(new DefaultTopicSelector(topicName)) + .withTupleToKafkaMapper(new FieldNameBasedTupleToKafkaMapper<>("key", "lambda")); + + builder.setBolt("forwardToKafka", bolt, 1).shuffleGrouping("spout"); + + return builder.createTopology(); + } + + /** + * Create the Storm config. + * @return the Storm config for the topology that publishes random UUIDs to Kafka using a Kafka bolt. + */ + private static Properties newProps(final String brokerUrl, final String topicName) { + return new Properties() { + { + put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerUrl); + put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer"); + put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer"); + put(ProducerConfig.CLIENT_ID_CONFIG, topicName); + } + }; + } +} diff --git a/examples/storm-kafka-client-examples/src/main/java/org/apache/storm/kafka/spout/KafkaSpoutTestBolt.java b/examples/storm-kafka-client-examples/src/main/java/org/apache/storm/kafka/spout/KafkaSpoutTestBolt.java new file mode 100644 index 00000000000..2bc348d7b3c --- /dev/null +++ b/examples/storm-kafka-client-examples/src/main/java/org/apache/storm/kafka/spout/KafkaSpoutTestBolt.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.kafka.spout; + +import java.util.Map; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.base.BaseRichBolt; +import org.apache.storm.tuple.Tuple; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class KafkaSpoutTestBolt extends BaseRichBolt { + protected static final Logger LOG = LoggerFactory.getLogger(KafkaSpoutTestBolt.class); + private OutputCollector collector; + + @Override + public void prepare(Map topoConf, TopologyContext context, OutputCollector collector) { + this.collector = collector; + } + + @Override + public void execute(Tuple input) { + LOG.debug("input = [" + input + "]"); + collector.ack(input); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + + } +} diff --git a/examples/storm-kafka-client-examples/src/main/java/org/apache/storm/kafka/spout/KafkaSpoutTopologyMainNamedTopics.java b/examples/storm-kafka-client-examples/src/main/java/org/apache/storm/kafka/spout/KafkaSpoutTopologyMainNamedTopics.java new file mode 100644 index 00000000000..beb794871be --- /dev/null +++ b/examples/storm-kafka-client-examples/src/main/java/org/apache/storm/kafka/spout/KafkaSpoutTopologyMainNamedTopics.java @@ -0,0 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.kafka.spout; + +import static org.apache.storm.kafka.spout.FirstPollOffsetStrategy.EARLIEST; + +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.generated.StormTopology; +import org.apache.storm.kafka.bolt.KafkaProducerTopology; +import org.apache.storm.kafka.spout.KafkaSpoutRetryExponentialBackoff.TimeInterval; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Values; + +/** + * This example sets up 3 topologies to put data in Kafka via the KafkaBolt, + * and shows how to set up a topology that reads from some Kafka topics using the KafkaSpout. + */ +public class KafkaSpoutTopologyMainNamedTopics { + + private static final String TOPIC_2_STREAM = "test_2_stream"; + private static final String TOPIC_0_1_STREAM = "test_0_1_stream"; + private static final String KAFKA_LOCAL_BROKER = "localhost:9092"; + public static final String TOPIC_0 = "kafka-spout-test"; + public static final String TOPIC_1 = "kafka-spout-test-1"; + public static final String TOPIC_2 = "kafka-spout-test-2"; + + public static void main(String[] args) throws Exception { + new KafkaSpoutTopologyMainNamedTopics().runMain(args); + } + + protected void runMain(String[] args) throws Exception { + final String brokerUrl = args.length > 0 ? args[0] : KAFKA_LOCAL_BROKER; + System.out.println("Running with broker url: " + brokerUrl); + + Config tpConf = getConfig(); + + // Producers. This is just to get some data in Kafka, normally you would be getting this data from elsewhere + StormSubmitter.submitTopology(TOPIC_0 + "-producer", tpConf, KafkaProducerTopology.newTopology(brokerUrl, TOPIC_0)); + StormSubmitter.submitTopology(TOPIC_1 + "-producer", tpConf, KafkaProducerTopology.newTopology(brokerUrl, TOPIC_1)); + StormSubmitter.submitTopology(TOPIC_2 + "-producer", tpConf, KafkaProducerTopology.newTopology(brokerUrl, TOPIC_2)); + + //Consumer. Sets up a topology that reads the given Kafka spouts and logs the received messages + StormSubmitter.submitTopology("storm-kafka-client-spout-test", tpConf, getTopologyKafkaSpout(getKafkaSpoutConfig(brokerUrl))); + } + + protected Config getConfig() { + Config config = new Config(); + config.setDebug(true); + return config; + } + + protected StormTopology getTopologyKafkaSpout(KafkaSpoutConfig spoutConfig) { + final TopologyBuilder tp = new TopologyBuilder(); + tp.setSpout("kafka_spout", new KafkaSpout<>(spoutConfig), 1); + tp.setBolt("kafka_bolt", new KafkaSpoutTestBolt()) + .shuffleGrouping("kafka_spout", TOPIC_0_1_STREAM) + .shuffleGrouping("kafka_spout", TOPIC_2_STREAM); + tp.setBolt("kafka_bolt_1", new KafkaSpoutTestBolt()).shuffleGrouping("kafka_spout", TOPIC_2_STREAM); + return tp.createTopology(); + } + + protected KafkaSpoutConfig getKafkaSpoutConfig(String bootstrapServers) { + ByTopicRecordTranslator trans = new ByTopicRecordTranslator<>( + (r) -> new Values(r.topic(), r.partition(), r.offset(), r.key(), r.value()), + new Fields("topic", "partition", "offset", "key", "value"), TOPIC_0_1_STREAM); + trans.forTopic(TOPIC_2, + (r) -> new Values(r.topic(), r.partition(), r.offset(), r.key(), r.value()), + new Fields("topic", "partition", "offset", "key", "value"), TOPIC_2_STREAM); + return KafkaSpoutConfig.builder(bootstrapServers, new String[]{TOPIC_0, TOPIC_1, TOPIC_2}) + .setProp(ConsumerConfig.GROUP_ID_CONFIG, "kafkaSpoutTestGroup") + .setRetry(getRetryService()) + .setRecordTranslator(trans) + .setOffsetCommitPeriodMs(10_000) + .setFirstPollOffsetStrategy(EARLIEST) + .setMaxUncommittedOffsets(250) + .build(); + } + + protected KafkaSpoutRetryService getRetryService() { + return new KafkaSpoutRetryExponentialBackoff(TimeInterval.microSeconds(500), + TimeInterval.milliSeconds(2), Integer.MAX_VALUE, TimeInterval.seconds(10)); + } +} diff --git a/examples/storm-kafka-client-examples/src/main/java/org/apache/storm/kafka/spout/KafkaSpoutTopologyMainWildcardTopics.java b/examples/storm-kafka-client-examples/src/main/java/org/apache/storm/kafka/spout/KafkaSpoutTopologyMainWildcardTopics.java new file mode 100644 index 00000000000..a5740782c94 --- /dev/null +++ b/examples/storm-kafka-client-examples/src/main/java/org/apache/storm/kafka/spout/KafkaSpoutTopologyMainWildcardTopics.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.kafka.spout; + +import static org.apache.storm.kafka.spout.FirstPollOffsetStrategy.EARLIEST; + +import java.util.regex.Pattern; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.storm.generated.StormTopology; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Values; + +/** + * This example is similar to {@link KafkaSpoutTopologyMainNamedTopics}, but demonstrates subscribing to Kafka topics with a regex. + */ +public class KafkaSpoutTopologyMainWildcardTopics extends KafkaSpoutTopologyMainNamedTopics { + + private static final String STREAM = "test_wildcard_stream"; + private static final Pattern TOPIC_WILDCARD_PATTERN = Pattern.compile("kafka-spout-test-[1|2]"); + + public static void main(String[] args) throws Exception { + new KafkaSpoutTopologyMainWildcardTopics().runMain(args); + } + + @Override + protected StormTopology getTopologyKafkaSpout(KafkaSpoutConfig spoutConfig) { + final TopologyBuilder tp = new TopologyBuilder(); + tp.setSpout("kafka_spout", new KafkaSpout<>(spoutConfig), 1); + tp.setBolt("kafka_bolt", new KafkaSpoutTestBolt()).shuffleGrouping("kafka_spout", STREAM); + return tp.createTopology(); + } + + @Override + protected KafkaSpoutConfig getKafkaSpoutConfig(String bootstrapServers) { + return KafkaSpoutConfig.builder(bootstrapServers, TOPIC_WILDCARD_PATTERN) + .setProp(ConsumerConfig.GROUP_ID_CONFIG, "kafkaSpoutTestGroup") + .setRetry(getRetryService()) + .setRecordTranslator((r) -> new Values(r.topic(), r.partition(), r.offset(), r.key(), r.value()), + new Fields("topic", "partition", "offset", "key", "value"), STREAM) + .setOffsetCommitPeriodMs(10_000) + .setFirstPollOffsetStrategy(EARLIEST) + .setMaxUncommittedOffsets(250) + .build(); + } +} diff --git a/examples/storm-kafka-client-examples/src/main/java/org/apache/storm/kafka/trident/TridentKafkaClientTopologyNamedTopics.java b/examples/storm-kafka-client-examples/src/main/java/org/apache/storm/kafka/trident/TridentKafkaClientTopologyNamedTopics.java new file mode 100644 index 00000000000..3c92a22d4fe --- /dev/null +++ b/examples/storm-kafka-client-examples/src/main/java/org/apache/storm/kafka/trident/TridentKafkaClientTopologyNamedTopics.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.kafka.trident; + +import static org.apache.storm.kafka.spout.FirstPollOffsetStrategy.EARLIEST; + +import java.io.Serializable; +import java.util.List; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.generated.AlreadyAliveException; +import org.apache.storm.generated.AuthorizationException; +import org.apache.storm.generated.InvalidTopologyException; +import org.apache.storm.kafka.bolt.KafkaProducerTopology; +import org.apache.storm.kafka.spout.Func; +import org.apache.storm.kafka.spout.trident.KafkaTridentSpoutConfig; +import org.apache.storm.kafka.spout.trident.KafkaTridentSpoutOpaque; +import org.apache.storm.kafka.spout.trident.KafkaTridentSpoutTransactional; +import org.apache.storm.trident.spout.ITridentDataSource; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Values; + +/** + * This example sets up a few topologies to put random strings in Kafka topics via the KafkaBolt, + * and shows how to set up a Trident topology that reads from some Kafka topics using the KafkaSpout. + */ +public class TridentKafkaClientTopologyNamedTopics { + + private static final String TOPIC_1 = "test-trident"; + private static final String TOPIC_2 = "test-trident-1"; + private static final String KAFKA_LOCAL_BROKER = "localhost:9092"; + + private KafkaTridentSpoutOpaque newKafkaTridentSpoutOpaque(KafkaTridentSpoutConfig spoutConfig) { + return new KafkaTridentSpoutOpaque<>(spoutConfig); + } + + private KafkaTridentSpoutTransactional newKafkaTridentSpoutTransactional( + KafkaTridentSpoutConfig spoutConfig) { + return new KafkaTridentSpoutTransactional<>(spoutConfig); + } + + private static final Func, List> JUST_VALUE_FUNC = new JustValueFunc(); + + /** + * Needs to be serializable. + */ + private static class JustValueFunc implements Func, List>, Serializable { + + @Override + public List apply(ConsumerRecord record) { + return new Values(record.value()); + } + } + + protected KafkaTridentSpoutConfig newKafkaSpoutConfig(String bootstrapServers) { + return KafkaTridentSpoutConfig.builder(bootstrapServers, TOPIC_1, TOPIC_2) + .setProp(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, 200) + .setRecordTranslator(JUST_VALUE_FUNC, new Fields("str")) + .setFirstPollOffsetStrategy(EARLIEST) + .build(); + } + + public static void main(String[] args) throws Exception { + new TridentKafkaClientTopologyNamedTopics().run(args); + } + + protected void run(String[] args) throws AlreadyAliveException, InvalidTopologyException, + AuthorizationException, InterruptedException { + final String brokerUrl = args.length > 0 ? args[0] : KAFKA_LOCAL_BROKER; + final boolean isOpaque = args.length > 1 ? Boolean.parseBoolean(args[1]) : true; + System.out.println("Running with broker url " + brokerUrl + " and isOpaque=" + isOpaque); + + Config tpConf = new Config(); + tpConf.setDebug(true); + tpConf.setMaxSpoutPending(5); + + // Producers + StormSubmitter.submitTopology(TOPIC_1 + "-producer", tpConf, KafkaProducerTopology.newTopology(brokerUrl, TOPIC_1)); + StormSubmitter.submitTopology(TOPIC_2 + "-producer", tpConf, KafkaProducerTopology.newTopology(brokerUrl, TOPIC_2)); + // Consumer + KafkaTridentSpoutConfig spoutConfig = newKafkaSpoutConfig(brokerUrl); + ITridentDataSource spout = isOpaque ? newKafkaTridentSpoutOpaque(spoutConfig) : newKafkaTridentSpoutTransactional(spoutConfig); + StormSubmitter.submitTopology("topics-consumer", tpConf, + TridentKafkaConsumerTopology.newTopology(spout)); + } +} diff --git a/examples/storm-kafka-client-examples/src/main/java/org/apache/storm/kafka/trident/TridentKafkaClientTopologyWildcardTopics.java b/examples/storm-kafka-client-examples/src/main/java/org/apache/storm/kafka/trident/TridentKafkaClientTopologyWildcardTopics.java new file mode 100644 index 00000000000..f770c75345d --- /dev/null +++ b/examples/storm-kafka-client-examples/src/main/java/org/apache/storm/kafka/trident/TridentKafkaClientTopologyWildcardTopics.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.kafka.trident; + +import static org.apache.storm.kafka.spout.FirstPollOffsetStrategy.EARLIEST; + +import java.util.regex.Pattern; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.storm.kafka.spout.trident.KafkaTridentSpoutConfig; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Values; + +/** + * This example is similar to {@link TridentKafkaClientTopologyWildcardTopics}, but demonstrates subscribing to Kafka topics with a regex. + */ +public class TridentKafkaClientTopologyWildcardTopics extends TridentKafkaClientTopologyNamedTopics { + private static final Pattern TOPIC_WILDCARD_PATTERN = Pattern.compile("test-trident(-1)?"); + + @Override + protected KafkaTridentSpoutConfig newKafkaSpoutConfig(String bootstrapServers) { + return KafkaTridentSpoutConfig.builder(bootstrapServers, TOPIC_WILDCARD_PATTERN) + .setProp(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, 200) + .setRecordTranslator((r) -> new Values(r.value()), new Fields("str")) + .setFirstPollOffsetStrategy(EARLIEST) + .build(); + } + + public static void main(String[] args) throws Exception { + new TridentKafkaClientTopologyWildcardTopics().run(args); + } +} diff --git a/examples/storm-kafka-client-examples/src/main/java/org/apache/storm/kafka/trident/TridentKafkaConsumerTopology.java b/examples/storm-kafka-client-examples/src/main/java/org/apache/storm/kafka/trident/TridentKafkaConsumerTopology.java new file mode 100644 index 00000000000..378b7ae56a8 --- /dev/null +++ b/examples/storm-kafka-client-examples/src/main/java/org/apache/storm/kafka/trident/TridentKafkaConsumerTopology.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.kafka.trident; + +import org.apache.storm.generated.StormTopology; +import org.apache.storm.trident.Stream; +import org.apache.storm.trident.TridentTopology; +import org.apache.storm.trident.operation.builtin.Debug; +import org.apache.storm.trident.spout.ITridentDataSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class TridentKafkaConsumerTopology { + + protected static final Logger LOG = LoggerFactory.getLogger(TridentKafkaConsumerTopology.class); + + /** + * Creates a new topology that prints inputs to stdout. + * @param tridentSpout The spout to use + */ + public static StormTopology newTopology(ITridentDataSource tridentSpout) { + final TridentTopology tridentTopology = new TridentTopology(); + final Stream spoutStream = tridentTopology.newStream("spout", tridentSpout).parallelismHint(2); + spoutStream.each(spoutStream.getOutputFields(), new Debug(false)); + return tridentTopology.build(); + } +} diff --git a/examples/storm-kafka-client-examples/src/test/java/org/apache/storm/kafka/spout/KafkaSpoutTopologyMainNamedTopicsLocal.java b/examples/storm-kafka-client-examples/src/test/java/org/apache/storm/kafka/spout/KafkaSpoutTopologyMainNamedTopicsLocal.java new file mode 100644 index 00000000000..0c82da726b2 --- /dev/null +++ b/examples/storm-kafka-client-examples/src/test/java/org/apache/storm/kafka/spout/KafkaSpoutTopologyMainNamedTopicsLocal.java @@ -0,0 +1,66 @@ +/* + * Copyright 2017 The Apache Software Foundation. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.kafka.spout; + +import static org.apache.storm.kafka.spout.KafkaSpoutTopologyMainNamedTopics.TOPIC_0; +import static org.apache.storm.kafka.spout.KafkaSpoutTopologyMainNamedTopics.TOPIC_1; +import static org.apache.storm.kafka.spout.KafkaSpoutTopologyMainNamedTopics.TOPIC_2; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import org.apache.storm.Config; +import org.apache.storm.LocalCluster; +import org.apache.storm.kafka.bolt.KafkaProducerTopology; + +public class KafkaSpoutTopologyMainNamedTopicsLocal { + + public static void main(String[] args) throws Exception { + new KafkaSpoutTopologyMainNamedTopicsLocal().runExample(); + } + + protected void runExample() throws Exception { + String brokerUrl = "localhost:9092"; + KafkaSpoutTopologyMainNamedTopics example = getTopology(); + Config tpConf = example.getConfig(); + + LocalCluster localCluster = new LocalCluster(); + // Producers. This is just to get some data in Kafka, normally you would be getting this data from elsewhere + localCluster.submitTopology(TOPIC_0 + "-producer", tpConf, KafkaProducerTopology.newTopology(brokerUrl, TOPIC_0)); + localCluster.submitTopology(TOPIC_1 + "-producer", tpConf, KafkaProducerTopology.newTopology(brokerUrl, TOPIC_1)); + localCluster.submitTopology(TOPIC_2 + "-producer", tpConf, KafkaProducerTopology.newTopology(brokerUrl, TOPIC_2)); + + //Consumer. Sets up a topology that reads the given Kafka spouts and logs the received messages + localCluster.submitTopology("storm-kafka-client-spout-test", tpConf, example.getTopologyKafkaSpout(example.getKafkaSpoutConfig(brokerUrl))); + + stopWaitingForInput(); + } + + protected KafkaSpoutTopologyMainNamedTopics getTopology() { + return new KafkaSpoutTopologyMainNamedTopics(); + } + + protected void stopWaitingForInput() { + try { + System.out.println("PRESS ENTER TO STOP"); + new BufferedReader(new InputStreamReader(System.in)).readLine(); + System.exit(0); + } catch (IOException e) { + e.printStackTrace(); + } + } +} diff --git a/examples/storm-kafka-client-examples/src/test/java/org/apache/storm/kafka/spout/KafkaSpoutTopologyMainWildcardTopicsLocal.java b/examples/storm-kafka-client-examples/src/test/java/org/apache/storm/kafka/spout/KafkaSpoutTopologyMainWildcardTopicsLocal.java new file mode 100644 index 00000000000..cf70bb34743 --- /dev/null +++ b/examples/storm-kafka-client-examples/src/test/java/org/apache/storm/kafka/spout/KafkaSpoutTopologyMainWildcardTopicsLocal.java @@ -0,0 +1,30 @@ +/* + * Copyright 2017 The Apache Software Foundation. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.kafka.spout; + +public class KafkaSpoutTopologyMainWildcardTopicsLocal extends KafkaSpoutTopologyMainNamedTopicsLocal { + + public static void main(String[] args) throws Exception { + new KafkaSpoutTopologyMainWildcardTopicsLocal().runExample(); + } + + @Override + protected KafkaSpoutTopologyMainNamedTopics getTopology() { + return new KafkaSpoutTopologyMainWildcardTopics(); + } + +} diff --git a/examples/storm-loadgen/README.md b/examples/storm-loadgen/README.md new file mode 100644 index 00000000000..88ad5f97546 --- /dev/null +++ b/examples/storm-loadgen/README.md @@ -0,0 +1,204 @@ +# Storm Load Generation Tools + +A set of tools to place an artificial load on a storm cluster to compare against a different storm cluster. This is particularly helpful when making changes to the data path in storm to see what if any impact the changes had. This is also useful for end users that want to compare different hardware setups to see what the trade-offs are, although actually running your real topologies is going to be more accurate. + +## Methodology +The idea behind all of these tools is to measure the trade-offs between latency, throughput, and cost when processing data using Apache Storm. + +When processing data you typically will know a few things. First you will know about how much data you are going to be processing. This will typically be a range of values that change throughput the day. You also will have an idea of how quickly you need the data processed by. Often this is measured in terms of the latency it takes to process data at some percentile or set of percentiles. This is because in most use cases the value of the data declines over time, and being able to react to the data quickly is more valuable. You probably also have a budget for how much you are willing to spend to be able to process this data. There are always trade-offs in how quickly you can process some data and how efficiently you can processes that data both in terms of resource usage (cost) and latency. These tools are designed to help you explore that space. + +A note on how latency is measured. Storm typically measures latency from when a message is emitted by a spout until the point it is fully acked or failed (in many versions of storm it actually does this in the acker instead of the spout so it is trying to be a measure of how long it takes for the actual processing, removing as much of the acker overhead as possible). For these tools we do it differently. We simulate a throughput and measure the start time of the tuple from when it would have been emitted if the topology could keep up with the load. In the normal case this should not be an issue, but if the topology cannot keep up with the throughput you will see the latency grow very high compared to the latency reported by storm. + +## Tools +### CaptureLoad + +`CaptureLoad` will look at the topologies on a running cluster and store the structure of and metrics about each in a format described below that can be used later to reproduce a similar load on the cluster. + +#### Usage +``` +storm jar storm-loadgen.jar org.apache.storm.loadgen.CaptureLoad [options] [topologyName]* +``` +|Option| Description| +|-----|-----| +|-a,--anonymize | Strip out any possibly identifiable information| +| -h,--help | Print a help message | +| -o,--output-dir | Where to write (defaults to ./loadgen/)| + +#### Limitations +This is still a work in progress. It does not currently capture CPU or memory usage of a topology. Resource requests (used by RAS when scheduling) within the topology are also not captured yet, nor is the user that actually ran the topology. + +### GenLoad + +`GenLoad` will take the files produced by `CaptureLoad` and replay them in a simulated way on a cluster. It also offers lots of ways to capture metrics about those simulated topologies to be able to compare different software versions of different hardware setups. You can also make adjustments to the topology before submitting it to change the size or throughput of the topology. + +### Usage +``` +storm jar storm-loadgen.jar org.apache.storm.loadgen.GenLoad [options] [capture_file]* +``` + +|Option| Description| +|-----|-----| +| --debug | Print debug information about the adjusted topology before submitting it. | +|-h,--help | Print a help message | +| --local-or-shuffle | Replace shuffle grouping with local or shuffle grouping. | +| --parallel <MULTIPLIER(:TOPO:COMP)?> | How much to scale the topology up or down in parallelism. The new parallelism will round up to the next whole number. If a topology + component is supplied only that component will be scaled. If topo or component is blank or a `'*'` all topologies or components matched the other part will be scaled. Only 1 scaling rule, the most specific, will be applied to a component. Providing a topology name is considered more specific than not providing one. (defaults to 1.0 no scaling) | +| -r,--report-interval <INTERVAL_SECS> | How long in between reported metrics. Will be rounded up to the next 10 sec boundary. default 30 | +| --reporter <TYPE:FILE?OPTIONS> | Provide the config for a reporter to run. See below for more information about these | +| -t,--test-time <MINS> | How long to run the tests for in mins (defaults to 5) | +| --throughput <MULTIPLIER(:TOPO:COMP)?> | How much to scale the topology up or down in throughput. If a topology + component is supplied only that component will be scaled. If topo or component is blank or a `'*'` all topologies or components matched will be scaled. Only 1 scaling rule, the most specific, will be applied to a component. Providing a topology name is considered more specific than not providing one.(defaults to 1.0 no scaling)| +| -w,--report-window <INTERVAL_SECS> | How long of a rolling window should be in each report. Will be rounded up to the next report interval boundary. default 30| +| --imbalance <MS(:COUNT)?:TOPO:COMP> | The number of ms that the first COUNT of TOPO:COMP will wait before processing. This creates an imbalance that helps test load aware groupings. By default there is no imbalance unless specificed by the captrue file. | + +## ThroughputVsLatency +A word count topology with metrics reporting like the `GenLoad` command. + +### Usage +``` +storm jar storm-loadgen.jar org.apache.storm.loadgen.ThroughputVsLatency [options] +``` + +|Option| Description| +|-----|-----| +|--counters <NUM>| Number of counter bolts to use (defaults to 1)| +| -h,--help | Print a help message | +| --name | Name of the topology to run (defaults to wc-test) | +| -r,--report-interval <INTERVAL_SECS>| How long in between reported metrics. Will be rounded up to the next 10 sec boundary. default 30 | +| --rate <SENTENCES/SEC>| How many sentences per second to run. (defaults to 500) | +| --reporter <TYPE:FILE?OPTIONS> | Provide the config for a reporter to run. See below for more information about these | +|--splitters <NUM> | Number of splitter bolts to use (defaults to 1) | +| --spouts <NUM>| Number of spouts to use (defaults to 1) | +| -t,--test-time <MINS>| How long to run the tests for in mins (defaults to 5) | +| -w,--report-window <INTERVAL_SECS>| How long of a rolling window should be in each report. Will be rounded up to the next report interval boundary.| +| --splitter-imbalance <MS(:COUNT)?> | The number of ms that the first COUNT splitters will wait before processing. This creates an imbalance that helps test load aware groupings (defaults to 0:1)| + +# Reporters +Reporters provide a way to store various statistics about a running topology. There are currently a few supported reporters + + * `legacy` - report values like ThroughputVsLatency has done in the past + * `tsv` - tab separated values + * `csv` - comma separated values + * `fixed` - a human readable fixed width format + +A `fixed` reporter to stdout will be added if no other reporters are writing to stdout or stderr. + +All of these types can have their data written out to a file. To do this add a path after the type. For example `legacy:./legacy_data` or `tsv:my_run.tsv`. By default the file will be over written unless an option is given to append instead. Options are in a URL like format, with a `?` separating the type:path from the options, and all of the options separated by a `&`. To append to the file you can do something like `csv:./my_run.csv?append` or `csv:./my_run.csv?append=true` + +Not all options are supported by all reporters. + +|Reporter Option| Description | Supported Reporters| +|---------------|-------------|--------------------| +|time | Set the time unit that you want latency and CPU reported in. This can be from nanoseconds up to seconds. Most names are supported for the types| legacy, csv, tsv, fixed| +|columns | A comma separated list of columns to output (see below for the metrics supported). A `*` is replaced by all metrics. Defaults to "start_time", "end_time", "rate", "mean", "99%ile", "99.9%ile", "cores", "mem", "failed", "ids", "congested" | csv, tsv, fixed | +|extraColumns | Like columns but ones that should be added to the defaults instead of replacing them. A `*` is replaced by all metrics. | csv, tsv, fixed | +|meta | An arbitrary string that will appear as a "meta" column at the end. This helps when appending to files to keep different runs separated | csv, tsv, fixed| +|precision | The number of places after the decimal point to print out. The default for fixed is 3, all others it is unlimited. | csv, tsv, fixed| +|tee | A boolean saying if in addition to writing to a file should the output be written to stdout too. | csv, tsv, fixed| +|columnWidth | The width of each field | fixed| + +There are a lot of different metrics supported + +|Metrics Name| Description| In | +|------------|------------|----| +|99%ile| 99th percentile completion latency. | all +|99.9%ile| 99.9th percentile completion latency. | all +|median| Median completion latency. | all +|mean| Mean completion latency. | all +|min| Minimum completion latency. | all +|max| Maximum completion latency. | all +|stddev| Standard Deviation of completion latency. | all +|user_cpu| User space CPU time.| all +|sys_cpu| System space CPU time. | all +|gc_cpu| Amount of CPU time spent in GC as reported by the JVM. | all +|cores| The number of CPU cores used. `(user_cpu + sys_cpu) / time_window`| all +|uptime| The amount of time the oldest topology has been up for. | all +|acked| The number of tuples fully acked as reported by Storm's metrics. | all +|acked_rate| The rate of tuples fully acked as reported by Storm's metrics. | all +|completed| The number of tuples fully acked as reported by the latency histogram metrics. | all +|rate| The rate of tuples fully acked as reported by the latency histogram metrics. | all +|mem| The amount of memory used by the topology in MB, as reported by the JVM. | all +|failed| The number of failed tuples as reported by Storm's metrics. | all +|start_time| The starting time of the metrics window from when the first topology was launched. | all +|end_time| The ending time of the metrics window from the the first topology was launched. | all +|time_window| the length in seconds for the time window. | all +|ids| The topology ids that are being tracked | all +|congested| Components that appear to be congested | all +|storm_version| The version of storm as reported by the client | all +|java_version| The version of java as reported by the client | all +|os_arch| The OS architecture as reported by the client | all +|os_name| The name of the OS as reported by the client | all +|os_version| The version of the OS as reported by the client | all +|config_override| And command line overrides to storm config values | all +|hosts| The number of hosts the monitored topologies are running on| all +|executors| The number of running executors in the monitored topologies | all +|workers| The number of workers the monitored topologies are running on | all +|skipped\_max\_spout| The number of ms in total that the spout reported it skipped trying to emit because of `topology.max.spout.pending`. This is the sum for all spouts and can be used to decide if setting the value higher will likely improve throughput. `congested` reports individual spouts that appear to be slowed down by this to a large degree. | all +|ui\_complete\_latency| This is a special metric, as it is the average completion latency as reported on the ui for `:all-time`. Because it is comes from the UI it does not follow the normal windows. Within a window the maximum value reported is used. | all +|target_rate| The target rate in sentences per second for the ThroughputVsLatency topology | ThroughputVsLatency +|spout_parallel| The parallelism of the spout for the `ThroughputVsLatency` topology. | ThroughputVsLatency +|split_parallel| The parallelism of the split bolt for the `ThroughputVsLatency` topology. | ThroughputVsLatency +|count_parallel| The parallelism of the count bolt for the `ThroughputVsLatency` topology. | ThroughputVsLatency +|parallel\_adjust| The adjustment to the parallelism in `GenLoad`. | GenLoad +|topo_parallel| A list of topology/component specific adjustment rules to the parallelism in `GenLoad`. | GenLoad +|throughput_adjust| The adjustment to the throughput in `GenLoad`. | GenLoad +|topo_throughput| A list of topology/component specific adjustment rules to the throughput in `GenLoad`. | GenLoad +|local\_or\_shuffle| true if shuffles were replaced with local or shuffle in GenLoad. | GenLoad +|slow\_execs| A list of topology/component specific adjustment rules to the slowExecutorPattern in `GenLoad`. | GenLoad + +There are also some generic rules that you can use for some metrics. Any metric that starts with `"conf:"` will be the config for that. It does not include config overrides from the `GenLoad` file. + +In addition any metric that ends with `"%ile"` will be the latency at that percentile. + + +# Captured Load File Format +The file format used with `CaptureLoad` and `GenLoad` is based off of the flux file format, but with some extensions and omissions. + +At a top level the supported options keys are + +| Config | Description | +|--------|-------------| +| name | The name of the topology. If not given the base name of the file will be used. | +| config | A map of String to Object configs to use when submitting the topology. | +| spouts | A list of spouts for the topology. | +| bolts | A list of bolts in the topology. | +| streams | A list of streams connecting different components in the topology. | + +## Spouts and Bolts + +Spouts and bolts have the same format. + +| Config | Description | +|--------|-------------| +| id | The id of the bolt or spout. This should be unique within the topology | +| parallelism | How many instances of this component should be a part of the topology | +| streams | The streams that are produced by this bolt or spout | +| cpuLoad | The number of cores this component needs for resource aware scheduling | +| memoryLoad | The amount of memory in MB that this component needs for resource aware scheduling | +| slowExecutorPattern.slownessMs | an Optional number of ms to slow down the exec + process latency for some of this component (defaults to 0) | +| slowExecutorPattern.count | the number of components to slow down (defaults to 1) | + +### Output Streams + +This is not a part of flux. It defines the output of a bolt or spout. + +| Config | Description | +|--------|-------------| +| streamId | The ID of the stream being output. The default is "default" | +| rate | This is a map describing the rate at which messages are output on this stream. | + +The rate has at least a `mean` value. If you want the rate to vary a bit over time you can also include a Standard Deviation with `stddev` and a `min` and `max` value. The actual rates selected will follow a Gaussian distribution within those bounds. + +## (Input) Streams + +The streams that connect components together has the form. + +| Config | Description | +|--------|-------------| +| from | the component id the stream is coming from | +| to | the component id the stream is going to | +| grouping | This is a map that defines the grouping used | +| grouping.type | the type of grouping including `SHUFFLE`, `FIELDS`, `ALL`, `GLOBAL`, `LOCAL_OR_SHUFFLE`, `NONE`, or `PARTIAL_KEY`. defaults to `SHUFFLE` | +| grouping.streamId | the id of the stream (default is "default") | +| execTime | a distribution of the amount of time in milliseconds that execution of this component takes (execute latency). | +| processTime | a distribution of the amount of time in milliseconds that processing of a tuple takes (process latency). | + +The `execTime` and `processTime` values follow the same pattern as the `OutputStream` `rate`. A `mean` values is required, but `stddev`, `min`, and `max` may also be given. diff --git a/examples/storm-loadgen/pom.xml b/examples/storm-loadgen/pom.xml new file mode 100644 index 00000000000..2976aa59a89 --- /dev/null +++ b/examples/storm-loadgen/pom.xml @@ -0,0 +1,135 @@ + + + + 4.0.0 + + storm + org.apache.storm + 2.8.4-SNAPSHOT + ../../pom.xml + + storm-loadgen + jar + storm-loadgen + + UTF-8 + + + + org.hdrhistogram + HdrHistogram + + + org.apache.storm + storm-client + ${project.version} + + ${provided.scope} + + + org.eclipse.jetty + jetty-server + ${jetty.version} + + + org.eclipse.jetty.ee10 + jetty-ee10-servlet + ${jetty.version} + + + org.apache.storm + storm-metrics + ${project.version} + + + commons-cli + commons-cli + + + com.google.guava + guava + + + commons-io + commons-io + + + net.minidev + json-smart + + + org.yaml + snakeyaml + + + + + + org.apache.maven.plugins + maven-shade-plugin + + true + + + *:* + + META-INF/*.SF + META-INF/*.sf + META-INF/*.DSA + META-INF/*.dsa + META-INF/*.RSA + META-INF/*.rsa + META-INF/*.EC + META-INF/*.ec + META-INF/MSFTSIG.SF + META-INF/MSFTSIG.RSA + + + + + + + package + + shade + + + + + + + + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + + + org.apache.maven.plugins + maven-pmd-plugin + + + + diff --git a/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/CaptureLoad.java b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/CaptureLoad.java new file mode 100644 index 00000000000..f970ef27ea9 --- /dev/null +++ b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/CaptureLoad.java @@ -0,0 +1,464 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.loadgen; + +import java.io.File; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; +import net.minidev.json.JSONObject; +import net.minidev.json.JSONValue; +import net.minidev.json.parser.JSONParser; +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.CommandLineParser; +import org.apache.commons.cli.DefaultParser; +import org.apache.commons.cli.HelpFormatter; +import org.apache.commons.cli.Option; +import org.apache.commons.cli.Options; +import org.apache.commons.cli.ParseException; +import org.apache.storm.Config; +import org.apache.storm.generated.Bolt; +import org.apache.storm.generated.BoltStats; +import org.apache.storm.generated.ComponentCommon; +import org.apache.storm.generated.ExecutorSummary; +import org.apache.storm.generated.GlobalStreamId; +import org.apache.storm.generated.Grouping; +import org.apache.storm.generated.Nimbus; +import org.apache.storm.generated.SpoutSpec; +import org.apache.storm.generated.StormTopology; +import org.apache.storm.generated.StreamInfo; +import org.apache.storm.generated.TopologyInfo; +import org.apache.storm.generated.TopologyPageInfo; +import org.apache.storm.generated.TopologySummary; +import org.apache.storm.generated.WorkerSummary; +import org.apache.storm.utils.NimbusClient; +import org.apache.storm.utils.ObjectReader; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Capture running topologies for load gen later on. + */ +public class CaptureLoad { + private static final Logger LOG = LoggerFactory.getLogger(CaptureLoad.class); + public static final String DEFAULT_OUT_DIR = "./loadgen/"; + + private static List extractBoltValues(List summaries, + GlobalStreamId id, + Function>> func) { + + List ret = new ArrayList<>(); + if (summaries != null) { + for (ExecutorSummary summ : summaries) { + if (summ != null && summ.is_set_stats()) { + Map> data = func.apply(summ.get_stats().get_specific().get_bolt()); + if (data != null) { + List subvalues = data.values().stream() + .map((subMap) -> subMap.get(id)) + .filter((value) -> value != null) + .collect(Collectors.toList()); + ret.addAll(subvalues); + } + } + } + } + return ret; + } + + static TopologyLoadConf captureTopology(Nimbus.Iface client, TopologySummary topologySummary) throws Exception { + String topologyName = topologySummary.get_name(); + LOG.info("Capturing {}...", topologyName); + String topologyId = topologySummary.get_id(); + TopologyInfo info = client.getTopologyInfo(topologyId); + TopologyPageInfo tpinfo = client.getTopologyPageInfo(topologyId, ":all-time", false); + @SuppressWarnings("checkstyle:VariableDeclarationUsageDistance") + StormTopology topo = client.getUserTopology(topologyId); + //Done capturing topology information... + + Map savedTopoConf = new HashMap<>(); + Map topoConf = (Map) JSONValue.parse(client.getTopologyConf(topologyId)); + for (String key: TopologyLoadConf.IMPORTANT_CONF_KEYS) { + Object o = topoConf.get(key); + if (o != null) { + savedTopoConf.put(key, o); + LOG.info("with config {}: {}", key, o); + } + } + //Lets use the number of actually scheduled workers as a way to bridge RAS and non-RAS + int numWorkers = tpinfo.get_num_workers(); + if (savedTopoConf.containsKey(Config.TOPOLOGY_WORKERS)) { + numWorkers = Math.max(numWorkers, ((Number) savedTopoConf.get(Config.TOPOLOGY_WORKERS)).intValue()); + } + savedTopoConf.put(Config.TOPOLOGY_WORKERS, numWorkers); + + Map boltBuilders = new HashMap<>(); + Map spoutBuilders = new HashMap<>(); + List inputStreams = new ArrayList<>(); + Map outStreams = new HashMap<>(); + + //Bolts + if (topo.get_bolts() != null) { + for (Map.Entry boltSpec : topo.get_bolts().entrySet()) { + String boltComp = boltSpec.getKey(); + LOG.info("Found bolt {}...", boltComp); + Bolt bolt = boltSpec.getValue(); + ComponentCommon common = bolt.get_common(); + Map inputs = common.get_inputs(); + if (inputs != null) { + for (Map.Entry input : inputs.entrySet()) { + GlobalStreamId id = input.getKey(); + LOG.info("with input {}...", id); + Grouping grouping = input.getValue(); + InputStream.Builder builder = new InputStream.Builder() + .withId(id.get_streamId()) + .withFromComponent(id.get_componentId()) + .withToComponent(boltComp) + .withGroupingType(grouping); + inputStreams.add(builder); + } + } + Map outputs = common.get_streams(); + if (outputs != null) { + for (String name : outputs.keySet()) { + GlobalStreamId id = new GlobalStreamId(boltComp, name); + LOG.info("and output {}...", id); + OutputStream.Builder builder = new OutputStream.Builder() + .withId(name); + outStreams.put(id, builder); + } + } + LoadCompConf.Builder builder = new LoadCompConf.Builder() + .withParallelism(common.get_parallelism_hint()) + .withId(boltComp); + boltBuilders.put(boltComp, builder); + } + + Map> boltResources = getBoltsResources(topo, topoConf); + for (Map.Entry> entry: boltResources.entrySet()) { + LoadCompConf.Builder bd = boltBuilders.get(entry.getKey()); + if (bd != null) { + Map resources = entry.getValue(); + Double cpu = resources.get(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT); + if (cpu != null) { + bd.withCpuLoad(cpu); + } + Double mem = resources.get(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB); + if (mem != null) { + bd.withMemoryLoad(mem); + } + } + } + } + + //Spouts + if (topo.get_spouts() != null) { + for (Map.Entry spoutSpec : topo.get_spouts().entrySet()) { + String spoutComp = spoutSpec.getKey(); + LOG.info("Found Spout {}...", spoutComp); + SpoutSpec spout = spoutSpec.getValue(); + ComponentCommon common = spout.get_common(); + + Map outputs = common.get_streams(); + if (outputs != null) { + for (String name : outputs.keySet()) { + GlobalStreamId id = new GlobalStreamId(spoutComp, name); + LOG.info("with output {}...", id); + OutputStream.Builder builder = new OutputStream.Builder() + .withId(name); + outStreams.put(id, builder); + } + } + LoadCompConf.Builder builder = new LoadCompConf.Builder() + .withParallelism(common.get_parallelism_hint()) + .withId(spoutComp); + spoutBuilders.put(spoutComp, builder); + } + + Map> spoutResources = getSpoutsResources(topo, topoConf); + for (Map.Entry> entry: spoutResources.entrySet()) { + LoadCompConf.Builder sd = spoutBuilders.get(entry.getKey()); + if (sd != null) { + Map resources = entry.getValue(); + Double cpu = resources.get(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT); + if (cpu != null) { + sd.withCpuLoad(cpu); + } + Double mem = resources.get(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB); + if (mem != null) { + sd.withMemoryLoad(mem); + } + } + } + } + + //Stats... + Map> byComponent = new HashMap<>(); + for (ExecutorSummary executor: info.get_executors()) { + String component = executor.get_component_id(); + List list = byComponent.get(component); + if (list == null) { + list = new ArrayList<>(); + byComponent.put(component, list); + } + list.add(executor); + } + + List streams = new ArrayList<>(inputStreams.size()); + //Compute the stats for the different input streams + for (InputStream.Builder builder : inputStreams) { + GlobalStreamId streamId = new GlobalStreamId(builder.getFromComponent(), builder.getId()); + List summaries = byComponent.get(builder.getToComponent()); + //Execute and process latency... + builder.withProcessTime(new NormalDistStats( + extractBoltValues(summaries, streamId, BoltStats::get_process_ms_avg))); + builder.withExecTime(new NormalDistStats( + extractBoltValues(summaries, streamId, BoltStats::get_execute_ms_avg))); + //InputStream is done + streams.add(builder.build()); + } + + //There is a bug in some versions that returns 0 for the uptime. + // To work around it we should get it an alternative (working) way. + Map workerToUptime = new HashMap<>(); + for (WorkerSummary ws : tpinfo.get_workers()) { + workerToUptime.put(ws.get_supervisor_id() + ":" + ws.get_port(), ws.get_uptime_secs()); + } + LOG.debug("WORKER TO UPTIME {}", workerToUptime); + + for (Map.Entry entry : outStreams.entrySet()) { + OutputStream.Builder builder = entry.getValue(); + GlobalStreamId id = entry.getKey(); + List emittedRate = new ArrayList<>(); + List summaries = byComponent.get(id.get_componentId()); + if (summaries != null) { + for (ExecutorSummary summary: summaries) { + if (summary.is_set_stats()) { + int uptime = summary.get_uptime_secs(); + LOG.debug("UPTIME {}", uptime); + if (uptime <= 0) { + //Likely it is because of a bug, so try to get it another way + String key = summary.get_host() + ":" + summary.get_port(); + uptime = workerToUptime.getOrDefault(key, 1); + LOG.debug("Getting uptime for worker {}, {}", key, uptime); + } + for (Map.Entry> statEntry : summary.get_stats().get_emitted().entrySet()) { + String timeWindow = statEntry.getKey(); + long timeSecs = uptime; + try { + timeSecs = Long.valueOf(timeWindow); + } catch (NumberFormatException e) { + //Ignored... + } + timeSecs = Math.min(timeSecs, uptime); + Long count = statEntry.getValue().get(id.get_streamId()); + if (count != null) { + LOG.debug("{} emitted {} for {} secs or {} tuples/sec", + id, count, timeSecs, count.doubleValue() / timeSecs); + emittedRate.add(count.doubleValue() / timeSecs); + } + } + } + } + } + builder.withRate(new NormalDistStats(emittedRate)); + + //The OutputStream is done + LoadCompConf.Builder comp = boltBuilders.get(id.get_componentId()); + if (comp == null) { + comp = spoutBuilders.get(id.get_componentId()); + } + comp.withStream(builder.build()); + } + + List spouts = spoutBuilders.values().stream() + .map((b) -> b.build()) + .collect(Collectors.toList()); + + List bolts = boltBuilders.values().stream() + .map((b) -> b.build()) + .collect(Collectors.toList()); + + return new TopologyLoadConf(topologyName, savedTopoConf, spouts, bolts, streams); + } + + /** + * Main entry point for CaptureLoad command. + * @param args the arguments to the command + * @throws Exception on any error + */ + public static void main(String[] args) throws Exception { + Options options = new Options(); + options.addOption(Option.builder("a") + .longOpt("anonymize") + .desc("Strip out any possibly identifiable information") + .build()); + options.addOption(Option.builder("o") + .longOpt("output-dir") + .argName("") + .hasArg() + .desc("Where to write (defaults to " + DEFAULT_OUT_DIR + ")") + .build()); + options.addOption(Option.builder("h") + .longOpt("help") + .desc("Print a help message") + .build()); + CommandLineParser parser = new DefaultParser(); + CommandLine cmd = null; + boolean printHelp = false; + try { + cmd = parser.parse(options, args); + } catch (ParseException e) { + System.err.println("ERROR " + e.getMessage()); + printHelp = true; + } + if (printHelp || cmd.hasOption('h')) { + new HelpFormatter().printHelp("CaptureLoad [options] [topologyName]*", options); + return; + } + + Config conf = new Config(); + int exitStatus = -1; + String outputDir = DEFAULT_OUT_DIR; + if (cmd.hasOption('o')) { + outputDir = cmd.getOptionValue('o'); + } + File baseOut = new File(outputDir); + LOG.info("Will save captured topologies to {}", baseOut); + baseOut.mkdirs(); + + try (NimbusClient nc = NimbusClient.Builder.withConf(conf).build()) { + Nimbus.Iface client = nc.getClient(); + List topologyNames = cmd.getArgList(); + + for (TopologySummary topologySummary: client.getTopologySummaries()) { + if (topologyNames.isEmpty() || topologyNames.contains(topologySummary.get_name())) { + TopologyLoadConf capturedConf = captureTopology(client, topologySummary); + if (cmd.hasOption('a')) { + capturedConf = capturedConf.anonymize(); + } + capturedConf.writeTo(new File(baseOut, capturedConf.name + ".yaml")); + } + } + + exitStatus = 0; + } catch (Exception e) { + LOG.error("Error trying to capture topologies...", e); + } finally { + System.exit(exitStatus); + } + } + + //ResourceUtils.java is not a available on the classpath to let us parse out the resources we want. + // So we have copied and pasted some of the needed methods here. (with a few changes to logging) + static Map> getBoltsResources(StormTopology topology, Map topologyConf) { + Map> boltResources = new HashMap<>(); + if (topology.get_bolts() != null) { + for (Map.Entry bolt : topology.get_bolts().entrySet()) { + Map topologyResources = parseResources(bolt.getValue().get_common().get_json_conf()); + checkInitialization(topologyResources, bolt.getValue().toString(), topologyConf); + boltResources.put(bolt.getKey(), topologyResources); + } + } + return boltResources; + } + + static Map> getSpoutsResources(StormTopology topology, Map topologyConf) { + Map> spoutResources = new HashMap<>(); + if (topology.get_spouts() != null) { + for (Map.Entry spout : topology.get_spouts().entrySet()) { + Map topologyResources = parseResources(spout.getValue().get_common().get_json_conf()); + checkInitialization(topologyResources, spout.getValue().toString(), topologyConf); + spoutResources.put(spout.getKey(), topologyResources); + } + } + return spoutResources; + } + + static Map parseResources(String input) { + Map topologyResources = new HashMap<>(); + JSONParser parser = new JSONParser(); + LOG.debug("Input to parseResources {}", input); + try { + if (input != null) { + Object obj = parser.parse(input); + JSONObject jsonObject = (JSONObject) obj; + if (jsonObject.containsKey(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB)) { + Double topoMemOnHeap = ObjectReader + .getDouble(jsonObject.get(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB), null); + topologyResources.put(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB, topoMemOnHeap); + } + if (jsonObject.containsKey(Config.TOPOLOGY_COMPONENT_RESOURCES_OFFHEAP_MEMORY_MB)) { + Double topoMemOffHeap = ObjectReader + .getDouble(jsonObject.get(Config.TOPOLOGY_COMPONENT_RESOURCES_OFFHEAP_MEMORY_MB), null); + topologyResources.put(Config.TOPOLOGY_COMPONENT_RESOURCES_OFFHEAP_MEMORY_MB, topoMemOffHeap); + } + if (jsonObject.containsKey(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT)) { + Double topoCpu = ObjectReader.getDouble(jsonObject.get(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT), + null); + topologyResources.put(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT, topoCpu); + } + LOG.debug("Topology Resources {}", topologyResources); + } + } catch (net.minidev.json.parser.ParseException e) { + LOG.error("Failed to parse component resources is:" + e.toString(), e); + return null; + } + return topologyResources; + } + + /** + * Checks if the topology's resource requirements are initialized. + * Will modify topologyResources by adding the appropriate defaults + * @param topologyResources map of resouces requirements + * @param componentId component for which initialization is being conducted + * @param topologyConf topology configuration + * @throws Exception on any error + */ + public static void checkInitialization(Map topologyResources, String componentId, Map topologyConf) { + StringBuilder msgBuilder = new StringBuilder(); + + for (String resourceName : topologyResources.keySet()) { + msgBuilder.append(checkInitResource(topologyResources, topologyConf, resourceName)); + } + + if (msgBuilder.length() > 0) { + String resourceDefaults = msgBuilder.toString(); + LOG.debug( + "Unable to extract resource requirement for Component {} \n Resources : {}", + componentId, resourceDefaults); + } + } + + private static String checkInitResource(Map topologyResources, Map topologyConf, String resourceName) { + StringBuilder msgBuilder = new StringBuilder(); + if (topologyResources.containsKey(resourceName)) { + Double resourceValue = (Double) topologyConf.getOrDefault(resourceName, null); + if (resourceValue != null) { + topologyResources.put(resourceName, resourceValue); + msgBuilder.append(resourceName.substring(resourceName.lastIndexOf(".")) + " has been set to " + resourceValue); + } + } + + return msgBuilder.toString(); + } + +} diff --git a/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/EstimateThroughput.java b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/EstimateThroughput.java new file mode 100644 index 00000000000..aee99bf225a --- /dev/null +++ b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/EstimateThroughput.java @@ -0,0 +1,105 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.loadgen; + +import java.util.ArrayList; +import java.util.List; +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.CommandLineParser; +import org.apache.commons.cli.DefaultParser; +import org.apache.commons.cli.HelpFormatter; +import org.apache.commons.cli.Option; +import org.apache.commons.cli.Options; +import org.apache.commons.cli.ParseException; +import org.apache.storm.Config; +import org.apache.storm.generated.ClusterSummary; +import org.apache.storm.generated.Nimbus; +import org.apache.storm.generated.TopologySummary; +import org.apache.storm.loadgen.CaptureLoad; +import org.apache.storm.utils.NimbusClient; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Estimate the throughput of all topologies. + */ +public class EstimateThroughput { + private static final Logger LOG = LoggerFactory.getLogger(EstimateThroughput.class); + + /** + * Main entry point for estimate throughput command. + * @param args the command line arguments. + * @throws Exception on any error. + */ + public static void main(String[] args) throws Exception { + Options options = new Options(); + options.addOption(Option.builder("h") + .longOpt("help") + .desc("Print a help message") + .build()); + CommandLineParser parser = new DefaultParser(); + CommandLine cmd = null; + boolean printHelp = false; + try { + cmd = parser.parse(options, args); + } catch (ParseException e) { + System.err.println("ERROR " + e.getMessage()); + printHelp = true; + } + if (printHelp || cmd.hasOption('h')) { + new HelpFormatter().printHelp("EstimateThroughput [options] [topologyName]*", options); + return; + } + + Config conf = new Config(); + int exitStatus = -1; + + List regular = new ArrayList<>(); + List trident = new ArrayList<>(); + + try (NimbusClient nc = NimbusClient.Builder.withConf(conf).build()) { + Nimbus.Iface client = nc.getClient(); + List topologyNames = cmd.getArgList(); + + for (TopologySummary topologySummary: client.getTopologySummaries()) { + if (topologyNames.isEmpty() || topologyNames.contains(topologySummary.get_name())) { + TopologyLoadConf capturedConf = CaptureLoad.captureTopology(client, topologySummary); + if (capturedConf.looksLikeTrident()) { + trident.add(capturedConf); + } else { + regular.add(capturedConf); + } + } + } + + System.out.println("TOPOLOGY\tTOTAL MESSAGES/sec\tESTIMATED INPUT MESSAGES/sec"); + for (TopologyLoadConf tl: regular) { + System.out.println(tl.name + "\t" + tl.getAllEmittedAggregate() + "\t" + tl.getSpoutEmittedAggregate()); + } + for (TopologyLoadConf tl: trident) { + System.out.println(tl.name + "\t" + tl.getAllEmittedAggregate() + "\t" + tl.getTridentEstimatedEmittedAggregate()); + } + exitStatus = 0; + } catch (Exception e) { + LOG.error("Error trying to capture topologies...", e); + } finally { + System.exit(exitStatus); + } + } +} diff --git a/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/ExecAndProcessLatencyEngine.java b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/ExecAndProcessLatencyEngine.java new file mode 100644 index 00000000000..c2dd81f71a4 --- /dev/null +++ b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/ExecAndProcessLatencyEngine.java @@ -0,0 +1,119 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.loadgen; + +import java.io.Serializable; +import java.util.Random; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.LockSupport; + +/** + * A more accurate sleep implementation. + */ +public class ExecAndProcessLatencyEngine implements Serializable { + private static final long NANO_IN_MS = TimeUnit.NANOSECONDS.convert(1, TimeUnit.MILLISECONDS); + private final SlowExecutorPattern skewedPattern; + + public static long toNano(double ms) { + return (long) (ms * NANO_IN_MS); + } + + private final AtomicLong parkOffset = new AtomicLong(0); + private Random rand; + private ScheduledExecutorService timer; + + public ExecAndProcessLatencyEngine() { + this(null); + } + + public ExecAndProcessLatencyEngine(SlowExecutorPattern skewedPattern) { + this.skewedPattern = skewedPattern; + } + + public void prepare() { + this.rand = ThreadLocalRandom.current(); + this.timer = Executors.newSingleThreadScheduledExecutor(); + } + + /** + * Sleep for a set number of nano seconds. + * @param start the start time of the sleep + * @param sleepAmount how many nano seconds after start when we should stop. + */ + public void sleepNano(long start, long sleepAmount) { + long endTime = start + sleepAmount; + // A small control algorithm to adjust the amount of time that we sleep to make it more accurate + long newEnd = endTime - parkOffset.get(); + long diff = newEnd - start; + //There are some different levels of accuracy here, and we want to deal with all of them + if (diff <= 1_000) { + //We are done, nothing that short is going to work here + } else if (diff < NANO_IN_MS) { + //Busy wait... + long sum = 0; + while (System.nanoTime() < newEnd) { + for (long i = 0; i < 1_000_000; i++) { + sum += i; + } + } + } else { + //More accurate that thread.sleep, but still not great + LockSupport.parkNanos(newEnd - System.nanoTime()); + } + parkOffset.addAndGet((System.nanoTime() - endTime) / 2); + } + + public void sleepNano(long nano) { + sleepNano(System.nanoTime(), nano); + } + + public void sleepUntilNano(long endTime) { + long start = System.nanoTime(); + sleepNano(start, endTime - start); + } + + /** + * Simulate both process and exec times. + * @param executorIndex the index of this executor. It is used to skew the latencies. + * @param startTimeNs when the executor started in nano-seconds. + * @param in the metrics for the input stream (or null if you don't want to use them). + * @param r what to run when the process latency is up. Note that this may run on a separate thread after this method call has + * completed. + */ + public void simulateProcessAndExecTime(int executorIndex, long startTimeNs, InputStream in, Runnable r) { + long extraTimeNs = skewedPattern == null ? 0 : toNano(skewedPattern.getExtraSlowness(executorIndex)); + long endExecNs = startTimeNs + extraTimeNs + (in == null ? 0 : ExecAndProcessLatencyEngine.toNano(in.execTime.nextRandom(rand))); + long endProcNs = startTimeNs + extraTimeNs + (in == null ? 0 : ExecAndProcessLatencyEngine.toNano(in.processTime.nextRandom(rand))); + + if ((endProcNs - 1_000_000) < endExecNs) { + sleepUntilNano(endProcNs); + r.run(); + } else { + timer.schedule(() -> { + r.run(); + }, Math.max(0, endProcNs - System.nanoTime()), TimeUnit.NANOSECONDS); + } + + sleepUntilNano(endExecNs); + } +} diff --git a/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/GenLoad.java b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/GenLoad.java new file mode 100644 index 00000000000..8ca16eadece --- /dev/null +++ b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/GenLoad.java @@ -0,0 +1,332 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.loadgen; + +import java.io.File; +import java.io.IOException; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.CommandLineParser; +import org.apache.commons.cli.DefaultParser; +import org.apache.commons.cli.HelpFormatter; +import org.apache.commons.cli.Option; +import org.apache.commons.cli.Options; +import org.apache.commons.cli.ParseException; +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.generated.AlreadyAliveException; +import org.apache.storm.generated.AuthorizationException; +import org.apache.storm.generated.InvalidTopologyException; +import org.apache.storm.metric.LoggingMetricsConsumer; +import org.apache.storm.topology.BoltDeclarer; +import org.apache.storm.topology.SpoutDeclarer; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.utils.NimbusClient; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Generate a simulated load. + */ +public class GenLoad { + private static final Logger LOG = LoggerFactory.getLogger(GenLoad.class); + private static final int TEST_EXECUTE_TIME_DEFAULT = 5; + private static final Pattern MULTI_PATTERN = Pattern.compile( + "(?[^:?]+)(?::(?[^:]*):(?.*))?"); + + /** + * Main entry point for GenLoad application. + * @param args the command line args. + * @throws Exception on any error. + */ + public static void main(String[] args) throws Exception { + Options options = new Options(); + options.addOption(Option.builder("h") + .longOpt("help") + .desc("Print a help message") + .build()); + options.addOption(Option.builder("t") + .longOpt("test-time") + .argName("MINS") + .hasArg() + .desc("How long to run the tests for in mins (defaults to " + TEST_EXECUTE_TIME_DEFAULT + ")") + .build()); + options.addOption(Option.builder() + .longOpt("parallel") + .argName("MULTIPLIER(:TOPO:COMP)?") + .hasArg() + .desc("How much to scale the topology up or down in parallelism. " + + "The new parallelism will round up to the next whole number. " + + "If a topology + component is supplied only that component will be scaled. " + + "If topo or component is blank or a '*' all topologies or components matched will be scaled. " + + "Only 1 scaling rule, the most specific, will be applied to a component. Providing a topology name is considered more " + + "specific than not providing one." + + "(defaults to 1.0 no scaling)") + .build()); + options.addOption(Option.builder() + .longOpt("throughput") + .argName("MULTIPLIER(:TOPO:COMP)?") + .hasArg() + .desc("How much to scale the topology up or down in throughput. " + + "If a topology + component is supplied only that component will be scaled. " + + "If topo or component is blank or a '*' all topologies or components matched will be scaled. " + + "Only 1 scaling rule, the most specific, will be applied to a component. Providing a topology name is considered more " + + "specific than not providing one." + + "(defaults to 1.0 no scaling)") + .build()); + options.addOption(Option.builder() + .longOpt("local-or-shuffle") + .desc("replace shuffle grouping with local or shuffle grouping") + .build()); + options.addOption(Option.builder() + .longOpt("imbalance") + .argName("MS(:COUNT)?:TOPO:COMP") + .hasArg() + .desc("The number of ms that the first COUNT of TOPO:COMP will wait before processing. This creates an imbalance " + + "that helps test load aware groupings. By default there is no imbalance. If no count is given it defaults to 1") + .build()); + options.addOption(Option.builder() + .longOpt("debug") + .desc("Print debug information about the adjusted topology before submitting it.") + .build()); + LoadMetricsServer.addCommandLineOptions(options); + CommandLineParser parser = new DefaultParser(); + CommandLine cmd = null; + Exception commandLineException = null; + double executeTime = TEST_EXECUTE_TIME_DEFAULT; + double globalParallel = 1.0; + Map topoSpecificParallel = new HashMap<>(); + double globalThroughput = 1.0; + Map topoSpecificThroughput = new HashMap<>(); + Map topoSpecificImbalance = new HashMap<>(); + try { + cmd = parser.parse(options, args); + if (cmd.hasOption("t")) { + executeTime = Double.valueOf(cmd.getOptionValue("t")); + } + if (cmd.hasOption("parallel")) { + for (String stringParallel : cmd.getOptionValues("parallel")) { + Matcher m = MULTI_PATTERN.matcher(stringParallel); + if (!m.matches()) { + throw new ParseException("--parallel " + stringParallel + " is not in the format MULTIPLIER(:TOPO:COMP)?"); + } + double parallel = Double.parseDouble(m.group("value")); + String topo = m.group("topo"); + if (topo == null || topo.isEmpty()) { + topo = "*"; + } + String comp = m.group("comp"); + if (comp == null || comp.isEmpty()) { + comp = "*"; + } + if ("*".equals(topo) && "*".equals(comp)) { + globalParallel = parallel; + } else { + topoSpecificParallel.put(topo + ":" + comp, parallel); + } + } + } + if (cmd.hasOption("throughput")) { + for (String stringThroughput : cmd.getOptionValues("throughput")) { + Matcher m = MULTI_PATTERN.matcher(stringThroughput); + if (!m.matches()) { + throw new ParseException("--throughput " + stringThroughput + " is not in the format MULTIPLIER(:TOPO:COMP)?"); + } + double throughput = Double.parseDouble(m.group("value")); + String topo = m.group("topo"); + if (topo == null || topo.isEmpty()) { + topo = "*"; + } + String comp = m.group("comp"); + if (comp == null || comp.isEmpty()) { + comp = "*"; + } + if ("*".equals(topo) && "*".equals(comp)) { + globalThroughput = throughput; + } else { + topoSpecificThroughput.put(topo + ":" + comp, throughput); + } + } + } + if (cmd.hasOption("imbalance")) { + for (String stringImbalance : cmd.getOptionValues("imbalance")) { + //We require there to be both a topology and a component in this case, so parse it out as such. + String [] parts = stringImbalance.split(":"); + if (parts.length < 3 || parts.length > 4) { + throw new ParseException(stringImbalance + " does not appear to match the expected pattern"); + } else if (parts.length == 3) { + topoSpecificImbalance.put(parts[1] + ":" + parts[2], SlowExecutorPattern.fromString(parts[0])); + } else { //== 4 + topoSpecificImbalance.put(parts[2] + ":" + parts[3], + SlowExecutorPattern.fromString(parts[0] + ":" + parts[1])); + } + } + } + } catch (ParseException | NumberFormatException e) { + commandLineException = e; + } + if (commandLineException != null || cmd.hasOption('h')) { + if (commandLineException != null) { + System.err.println("ERROR " + commandLineException.getMessage()); + } + new HelpFormatter().printHelp("GenLoad [options] [captured_file]*", options); + return; + } + Map metrics = new LinkedHashMap<>(); + metrics.put("parallel_adjust", globalParallel); + metrics.put("throughput_adjust", globalThroughput); + metrics.put("local_or_shuffle", cmd.hasOption("local-or-shuffle")); + metrics.put("topo_parallel", topoSpecificParallel.entrySet().stream().map((entry) -> entry.getValue() + ":" + entry.getKey()) + .collect(Collectors.toList())); + metrics.put("topo_throuhgput", topoSpecificThroughput.entrySet().stream().map((entry) -> entry.getValue() + ":" + entry.getKey()) + .collect(Collectors.toList())); + metrics.put("slow_execs", topoSpecificImbalance.entrySet().stream().map((entry) -> entry.getValue() + ":" + entry.getKey()) + .collect(Collectors.toList())); + + Config conf = new Config(); + LoadMetricsServer metricServer = new LoadMetricsServer(conf, cmd, metrics); + + metricServer.serve(); + String url = metricServer.getUrl(); + int exitStatus = -1; + try (NimbusClient client = NimbusClient.Builder.withConf(conf).build(); + ScopedTopologySet topoNames = new ScopedTopologySet(client.getClient())) { + for (String topoFile : cmd.getArgList()) { + try { + TopologyLoadConf tlc = readTopology(topoFile); + tlc = tlc.scaleParallel(globalParallel, topoSpecificParallel); + tlc = tlc.scaleThroughput(globalThroughput, topoSpecificThroughput); + tlc = tlc.overrideSlowExecs(topoSpecificImbalance); + if (cmd.hasOption("local-or-shuffle")) { + tlc = tlc.replaceShuffleWithLocalOrShuffle(); + } + if (cmd.hasOption("debug")) { + LOG.info("DEBUGGING: {}", tlc.toYamlString()); + } + topoNames.add(parseAndSubmit(tlc, url)); + } catch (Exception e) { + System.err.println("Could Not Submit Topology From " + topoFile); + e.printStackTrace(System.err); + } + } + + metricServer.monitorFor(executeTime, client.getClient(), topoNames); + exitStatus = 0; + } catch (Exception e) { + LOG.error("Error trying to run topologies...", e); + } finally { + System.exit(exitStatus); + } + } + + private static TopologyLoadConf readTopology(String topoFile) throws IOException { + File f = new File(topoFile); + + TopologyLoadConf tlc = TopologyLoadConf.fromConf(f); + if (tlc.name == null) { + String fileName = f.getName(); + int dot = fileName.lastIndexOf('.'); + final String baseName = fileName.substring(0, dot); + tlc = tlc.withName(baseName); + } + return tlc; + } + + private static int uniquifier = 0; + + private static String parseAndSubmit(TopologyLoadConf tlc, String url) throws IOException, InvalidTopologyException, + AuthorizationException, AlreadyAliveException { + + //First we need some configs + Config conf = new Config(); + if (tlc.topoConf != null) { + conf.putAll(tlc.topoConf); + } + //For some reason on the new code if ackers is null we get 0??? + Object ackers = conf.get(Config.TOPOLOGY_ACKER_EXECUTORS); + Object workers = conf.get(Config.TOPOLOGY_WORKERS); + if (ackers == null || ((Number) ackers).intValue() <= 0) { + if (workers == null) { + workers = 1; + } + conf.put(Config.TOPOLOGY_ACKER_EXECUTORS, workers); + } + conf.registerMetricsConsumer(LoggingMetricsConsumer.class); + conf.registerMetricsConsumer(HttpForwardingMetricsConsumer.class, url, 1); + Map workerMetrics = new HashMap<>(); + if (!NimbusClient.isLocalOverride()) { + //sigar uses JNI and does not work in local mode + workerMetrics.put("CPU", "org.apache.storm.metrics.sigar.CPUMetric"); + } + conf.put(Config.TOPOLOGY_WORKER_METRICS, workerMetrics); + conf.put(Config.TOPOLOGY_BUILTIN_METRICS_BUCKET_SIZE_SECS, 10); + + //Lets build a topology. + TopologyBuilder builder = new TopologyBuilder(); + for (LoadCompConf spoutConf : tlc.spouts) { + System.out.println("ADDING SPOUT " + spoutConf.id); + SpoutDeclarer sd = builder.setSpout(spoutConf.id, new LoadSpout(spoutConf), spoutConf.parallelism); + if (spoutConf.memoryLoad > 0) { + sd.setMemoryLoad(spoutConf.memoryLoad); + } + if (spoutConf.cpuLoad > 0) { + sd.setCPULoad(spoutConf.cpuLoad); + } + } + + Map boltDeclarers = new HashMap<>(); + Map bolts = new HashMap<>(); + if (tlc.bolts != null) { + for (LoadCompConf boltConf : tlc.bolts) { + System.out.println("ADDING BOLT " + boltConf.id); + LoadBolt lb = new LoadBolt(boltConf); + bolts.put(boltConf.id, lb); + BoltDeclarer bd = builder.setBolt(boltConf.id, lb, boltConf.parallelism); + if (boltConf.memoryLoad > 0) { + bd.setMemoryLoad(boltConf.memoryLoad); + } + if (boltConf.cpuLoad > 0) { + bd.setCPULoad(boltConf.cpuLoad); + } + boltDeclarers.put(boltConf.id, bd); + } + } + + if (tlc.streams != null) { + for (InputStream in : tlc.streams) { + BoltDeclarer declarer = boltDeclarers.get(in.toComponent); + if (declarer == null) { + throw new IllegalArgumentException("to bolt " + in.toComponent + " does not exist"); + } + LoadBolt lb = bolts.get(in.toComponent); + lb.add(in); + in.groupingType.assign(declarer, in); + } + } + + String topoName = tlc.name + "-" + uniquifier++; + StormSubmitter.submitTopology(topoName, conf, builder.createTopology()); + return topoName; + } +} diff --git a/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/GroupingType.java b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/GroupingType.java new file mode 100644 index 00000000000..a4e0c1af7b5 --- /dev/null +++ b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/GroupingType.java @@ -0,0 +1,91 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.loadgen; + +import java.util.Locale; +import org.apache.storm.topology.BoltDeclarer; +import org.apache.storm.tuple.Fields; + +/** + * The different types of groupings that are supported. + */ +public enum GroupingType { + SHUFFLE { + @Override + public void assign(BoltDeclarer declarer, InputStream stream) { + declarer.shuffleGrouping(stream.fromComponent, stream.id); + } + }, + FIELDS { + @Override + public void assign(BoltDeclarer declarer, InputStream stream) { + declarer.fieldsGrouping(stream.fromComponent, stream.id, new Fields("key")); + } + }, + ALL { + @Override + public void assign(BoltDeclarer declarer, InputStream stream) { + declarer.allGrouping(stream.fromComponent, stream.id); + } + }, + GLOBAL { + @Override + public void assign(BoltDeclarer declarer, InputStream stream) { + declarer.globalGrouping(stream.fromComponent, stream.id); + } + }, + LOCAL_OR_SHUFFLE { + @Override + public void assign(BoltDeclarer declarer, InputStream stream) { + declarer.localOrShuffleGrouping(stream.fromComponent, stream.id); + } + }, + NONE { + @Override + public void assign(BoltDeclarer declarer, InputStream stream) { + declarer.noneGrouping(stream.fromComponent, stream.id); + } + }, + PARTIAL_KEY { + @Override + public void assign(BoltDeclarer declarer, InputStream stream) { + declarer.partialKeyGrouping(stream.fromComponent, stream.id, new Fields("key")); + } + }; + + /** + * Parse a String config value and covert it into the enum. + * @param conf the string config. + * @return the parsed grouping type or SHUFFLE if conf is null. + * @throws IllegalArgumentException if parsing does not work. + */ + public static GroupingType fromConf(String conf) { + String gt = "SHUFFLE"; + if (conf != null) { + gt = conf.toUpperCase(Locale.ENGLISH); + } + return GroupingType.valueOf(gt); + } + + public String toConf() { + return toString(); + } + + public abstract void assign(BoltDeclarer declarer, InputStream stream); +} diff --git a/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/HttpForwardingMetricsConsumer.java b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/HttpForwardingMetricsConsumer.java new file mode 100644 index 00000000000..f316c76300b --- /dev/null +++ b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/HttpForwardingMetricsConsumer.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.loadgen; + +import com.esotericsoftware.kryo.io.Output; +import java.net.HttpURLConnection; +import java.net.URL; +import java.util.Arrays; +import java.util.Collection; +import java.util.Map; +import org.apache.storm.metric.api.IMetricsConsumer; +import org.apache.storm.serialization.KryoValuesSerializer; +import org.apache.storm.task.IErrorReporter; +import org.apache.storm.task.TopologyContext; + +/** + * Listens for all metrics and POSTs them serialized to a configured URL. + * + *

To use, add this to your topology's configuration: + * ```java + * conf.registerMetricsConsumer(HttpForwardingMetricsConsumer.class, "http://example.com:8080/metrics/my-topology/", 1); + * ``` + * + *

The body of the post is data serialized using {@link org.apache.storm.serialization.KryoValuesSerializer}, with the data passed in + * as a list of `[TaskInfo, Collection<DataPoint>]`. More things may be appended to the end of the list in the future. + * + *

The values can be deserialized using the org.apache.storm.serialization.KryoValuesDeserializer, and a correct config + classpath. + * + *

@see org.apache.storm.serialization.KryoValuesSerializer + */ +public class HttpForwardingMetricsConsumer implements IMetricsConsumer { + private transient URL url; + private transient IErrorReporter errorReporter; + private transient KryoValuesSerializer serializer; + private transient String topologyId; + + @Override + public void prepare(Map topoConf, Object registrationArgument, TopologyContext context, IErrorReporter errorReporter) { + try { + url = new URL((String) registrationArgument); + this.errorReporter = errorReporter; + serializer = new KryoValuesSerializer(topoConf); + topologyId = context.getStormId(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + @Override + public void handleDataPoints(TaskInfo taskInfo, Collection dataPoints) { + try { + HttpURLConnection con = (HttpURLConnection) url.openConnection(); + con.setRequestMethod("POST"); + con.setDoOutput(true); + try (Output out = new Output(con.getOutputStream())) { + serializer.serializeInto(Arrays.asList(taskInfo, dataPoints, topologyId), out); + out.flush(); + } + //The connection is not sent unless a response is requested + int response = con.getResponseCode(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + @Override + public void cleanup() { } +} diff --git a/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/HttpForwardingMetricsServer.java b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/HttpForwardingMetricsServer.java new file mode 100644 index 00000000000..74317137fe3 --- /dev/null +++ b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/HttpForwardingMetricsServer.java @@ -0,0 +1,124 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.loadgen; + +import com.esotericsoftware.kryo.io.Input; +import jakarta.servlet.ServletException; +import jakarta.servlet.http.HttpServlet; +import jakarta.servlet.http.HttpServletRequest; +import jakarta.servlet.http.HttpServletResponse; +import java.io.IOException; +import java.net.InetAddress; +import java.net.ServerSocket; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import org.apache.storm.metric.api.IMetricsConsumer.DataPoint; +import org.apache.storm.metric.api.IMetricsConsumer.TaskInfo; +import org.apache.storm.serialization.KryoValuesDeserializer; +import org.apache.storm.utils.Utils; +import org.eclipse.jetty.ee10.servlet.ServletContextHandler; +import org.eclipse.jetty.ee10.servlet.ServletHolder; +import org.eclipse.jetty.server.Server; + +/** + * A server that can listen for metrics from the HttpForwardingMetricsConsumer. + */ +public abstract class HttpForwardingMetricsServer { + private Map conf; + private Server server = null; + private int port = -1; + private String url = null; + + ThreadLocal des = new ThreadLocal() { + @Override + protected KryoValuesDeserializer initialValue() { + return new KryoValuesDeserializer(conf); + } + }; + + private class MetricsCollectionServlet extends HttpServlet { + @Override + protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { + Input in = new Input(request.getInputStream()); + List metrics = des.get().deserializeFrom(in); + handle((TaskInfo) metrics.get(0), (Collection) metrics.get(1), (String) metrics.get(2)); + response.setStatus(HttpServletResponse.SC_OK); + } + } + + /** + * Constructor. + * @param conf the configuration for storm. + */ + public HttpForwardingMetricsServer(Map conf) { + this.conf = Utils.readStormConfig(); + if (conf != null) { + this.conf.putAll(conf); + } + } + + //This needs to be thread safe + public abstract void handle(TaskInfo taskInfo, Collection dataPoints, String topologyId); + + /** + * Start the server. + * @param port the port it shuld listen on, or null/<= 0 to pick a free ephemeral port. + */ + public void serve(Integer port) { + try { + if (server != null) { + throw new RuntimeException("The server is already running"); + } + + if (port == null || port <= 0) { + ServerSocket s = new ServerSocket(0); + port = s.getLocalPort(); + s.close(); + } + server = new Server(port); + this.port = port; + url = "http://" + InetAddress.getLocalHost().getHostName() + ":" + this.port + "/"; + + ServletContextHandler context = new ServletContextHandler(ServletContextHandler.SESSIONS); + context.setContextPath("/"); + server.setHandler(context); + + context.addServlet(new ServletHolder(new MetricsCollectionServlet()), "/*"); + + server.start(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public void serve() { + serve(null); + } + + public int getPort() { + return port; + } + + public String getUrl() { + return url; + } +} diff --git a/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/InputStream.java b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/InputStream.java new file mode 100644 index 00000000000..19802d9e0fa --- /dev/null +++ b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/InputStream.java @@ -0,0 +1,263 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.loadgen; + +import java.io.ByteArrayInputStream; +import java.io.ObjectInputStream; +import java.io.Serializable; +import java.util.HashMap; +import java.util.Map; +import org.apache.storm.generated.GlobalStreamId; +import org.apache.storm.generated.Grouping; +import org.apache.storm.grouping.PartialKeyGrouping; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A set of measurements about a stream so we can statistically reproduce it. + */ +public class InputStream implements Serializable { + private static final Logger LOG = LoggerFactory.getLogger(InputStream.class); + public final String fromComponent; + public final String toComponent; + public final String id; + public final NormalDistStats execTime; + public final NormalDistStats processTime; + public final GroupingType groupingType; + //Cached GlobalStreamId + private GlobalStreamId gsid = null; + + /** + * Create an output stream from a config. + * @param conf the config to read from. + * @return the read OutputStream. + */ + public static InputStream fromConf(Map conf) { + String component = (String) conf.get("from"); + String toComp = (String) conf.get("to"); + NormalDistStats execTime = NormalDistStats.fromConf((Map) conf.get("execTime")); + NormalDistStats processTime = NormalDistStats.fromConf((Map) conf.get("processTime")); + Map grouping = (Map) conf.get("grouping"); + GroupingType groupingType = GroupingType.fromConf((String) grouping.get("type")); + String streamId = (String) grouping.getOrDefault("streamId", "default"); + return new InputStream(component, toComp, streamId, execTime, processTime, groupingType); + } + + /** + * Convert this to a conf. + * @return the conf. + */ + public Map toConf() { + Map ret = new HashMap<>(); + ret.put("from", fromComponent); + ret.put("to", toComponent); + ret.put("execTime", execTime.toConf()); + ret.put("processTime", processTime.toConf()); + + Map grouping = new HashMap<>(); + grouping.put("streamId", id); + grouping.put("type", groupingType.toConf()); + ret.put("grouping", grouping); + + return ret; + } + + public static class Builder { + private String fromComponent; + private String toComponent; + private String id; + private NormalDistStats execTime; + private NormalDistStats processTime; + private GroupingType groupingType = GroupingType.SHUFFLE; + + public String getFromComponent() { + return fromComponent; + } + + public Builder withFromComponent(String fromComponent) { + this.fromComponent = fromComponent; + return this; + } + + public String getToComponent() { + return toComponent; + } + + public Builder withToComponent(String toComponent) { + this.toComponent = toComponent; + return this; + } + + public String getId() { + return id; + } + + public Builder withId(String id) { + this.id = id; + return this; + } + + public NormalDistStats getExecTime() { + return execTime; + } + + public Builder withExecTime(NormalDistStats execTime) { + this.execTime = execTime; + return this; + } + + public NormalDistStats getProcessTime() { + return processTime; + } + + public Builder withProcessTime(NormalDistStats processTime) { + this.processTime = processTime; + return this; + } + + public GroupingType getGroupingType() { + return groupingType; + } + + public Builder withGroupingType(GroupingType groupingType) { + this.groupingType = groupingType; + return this; + } + + /** + * Add the grouping type based off of the thrift Grouping class. + * @param grouping the Grouping to extract the grouping type from + * @return this + */ + @SuppressWarnings("checkstyle:FallThrough") + public Builder withGroupingType(Grouping grouping) { + GroupingType group = GroupingType.SHUFFLE; + Grouping._Fields thriftType = grouping.getSetField(); + + switch (thriftType) { + case FIELDS: + //Global Grouping is fields with an empty list + if (grouping.get_fields().isEmpty()) { + group = GroupingType.GLOBAL; + } else { + group = GroupingType.FIELDS; + } + break; + case ALL: + group = GroupingType.ALL; + break; + case NONE: + group = GroupingType.NONE; + break; + case SHUFFLE: + group = GroupingType.SHUFFLE; + break; + case LOCAL_OR_SHUFFLE: + group = GroupingType.LOCAL_OR_SHUFFLE; + break; + case CUSTOM_SERIALIZED: + //This might be a partial key grouping.. + byte[] data = grouping.get_custom_serialized(); + try (ByteArrayInputStream bis = new ByteArrayInputStream(data); + ObjectInputStream ois = new ObjectInputStream(bis);) { + Object cg = ois.readObject(); + if (cg instanceof PartialKeyGrouping) { + group = GroupingType.PARTIAL_KEY; + break; + } + } catch (Exception e) { + //ignored + } + //Fall through if not supported + default: + LOG.warn("{} is not supported for replay of a topology. Using SHUFFLE", thriftType); + break; + } + return withGroupingType(group); + } + + public InputStream build() { + return new InputStream(fromComponent, toComponent, id, execTime, processTime, groupingType); + } + } + + /** + * Create a new input stream to a bolt. + * @param fromComponent the source component of the stream. + * @param id the id of the stream + * @param execTime exec time stats + * @param processTime process time stats + */ + public InputStream(String fromComponent, String toComponent, String id, NormalDistStats execTime, + NormalDistStats processTime, GroupingType groupingType) { + this.fromComponent = fromComponent; + this.toComponent = toComponent; + if (fromComponent == null) { + throw new IllegalArgumentException("from cannot be null"); + } + if (toComponent == null) { + throw new IllegalArgumentException("to cannot be null"); + } + this.id = id; + if (id == null) { + throw new IllegalArgumentException("id cannot be null"); + } + this.execTime = execTime; + this.processTime = processTime; + this.groupingType = groupingType; + if (groupingType == null) { + throw new IllegalArgumentException("grouping type cannot be null"); + } + } + + /** + * Get the global stream id for this input stream. + * @return the GlobalStreamId for this input stream. + */ + public synchronized GlobalStreamId gsid() { + if (gsid == null) { + gsid = new GlobalStreamId(fromComponent, id); + } + return gsid; + } + + /** + * Remap the names of components. + * @param remappedComponents old name to new name of components. + * @param remappedStreams old ID to new ID of streams. + * @return a modified version of this with names remapped. + */ + public InputStream remap(Map remappedComponents, Map remappedStreams) { + String remapTo = remappedComponents.get(toComponent); + String remapFrom = remappedComponents.get(fromComponent); + GlobalStreamId remapStreamId = remappedStreams.get(gsid()); + return new InputStream(remapFrom, remapTo, remapStreamId.get_streamId(), execTime, processTime, groupingType); + } + + /** + * Replace all SHUFFLE groupings with LOCAL_OR_SHUFFLE. + * @return a modified copy of this + */ + public InputStream replaceShuffleWithLocalOrShuffle() { + if (groupingType != GroupingType.SHUFFLE) { + return this; + } + return new InputStream(fromComponent, toComponent, id, execTime, processTime, GroupingType.LOCAL_OR_SHUFFLE); + } +} diff --git a/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/LoadBolt.java b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/LoadBolt.java new file mode 100644 index 00000000000..7eb2b73fdc2 --- /dev/null +++ b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/LoadBolt.java @@ -0,0 +1,94 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.loadgen; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import org.apache.storm.generated.GlobalStreamId; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.base.BaseRichBolt; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A bolt that simulates a real world bolt based off of statistics about it. + */ +public class LoadBolt extends BaseRichBolt { + private static final Logger LOG = LoggerFactory.getLogger(LoadBolt.class); + private final List outputStreamStats; + private List outputStreams; + private final Map inputStreams = new HashMap<>(); + private OutputCollector collector; + private final ExecAndProcessLatencyEngine sleep; + private int executorIndex; + + public LoadBolt(LoadCompConf conf) { + this.outputStreamStats = Collections.unmodifiableList(new ArrayList<>(conf.streams)); + sleep = new ExecAndProcessLatencyEngine(conf.slp); + } + + public void add(InputStream inputStream) { + GlobalStreamId id = inputStream.gsid(); + inputStreams.put(id, inputStream); + } + + @Override + public void prepare(Map topoConf, TopologyContext context, OutputCollector collector) { + outputStreams = Collections.unmodifiableList(outputStreamStats.stream() + .map((ss) -> new OutputStreamEngine(ss)).collect(Collectors.toList())); + this.collector = collector; + executorIndex = context.getThisTaskIndex(); + sleep.prepare(); + } + + private void emitTuples(Tuple input) { + for (OutputStreamEngine se: outputStreams) { + // we may output many tuples for a given input tuple + while (se.shouldEmit() != null) { + collector.emit(se.streamName, input, new Values(se.nextKey(), "SOME-BOLT-VALUE")); + } + } + } + + @Override + public void execute(final Tuple input) { + long startTimeNs = System.nanoTime(); + InputStream in = inputStreams.get(input.getSourceGlobalStreamId()); + sleep.simulateProcessAndExecTime(executorIndex, startTimeNs, in, () -> { + emitTuples(input); + collector.ack(input); + }); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + for (OutputStream s: outputStreamStats) { + declarer.declareStream(s.id, new Fields("key", "value")); + } + } +} diff --git a/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/LoadCompConf.java b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/LoadCompConf.java new file mode 100644 index 00000000000..724083854f2 --- /dev/null +++ b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/LoadCompConf.java @@ -0,0 +1,259 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.loadgen; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import org.apache.storm.generated.GlobalStreamId; +import org.apache.storm.utils.ObjectReader; + +/** + * Configuration for a simulated spout. + */ +public class LoadCompConf { + public final String id; + public final int parallelism; + public final List streams; + public final double cpuLoad; + public final double memoryLoad; + public final SlowExecutorPattern slp; + + /** + * Parse the LoadCompConf from a config Map. + * @param conf the map holding the config for a LoadCompConf. + * @return the parsed object. + */ + public static LoadCompConf fromConf(Map conf) { + String id = (String) conf.get("id"); + int parallelism = ObjectReader.getInt(conf.get("parallelism"), 1); + List streams = new ArrayList<>(); + List> streamData = (List>) conf.get("streams"); + if (streamData != null) { + for (Map streamInfo: streamData) { + streams.add(OutputStream.fromConf(streamInfo)); + } + } + double memoryMb = ObjectReader.getDouble(conf.get("memoryLoad"), 0.0); + double cpuPercent = ObjectReader.getDouble(conf.get("cpuLoad"), 0.0); + + SlowExecutorPattern slp = null; + if (conf.containsKey("slowExecutorPattern")) { + slp = SlowExecutorPattern.fromConf((Map) conf.get("slowExecutorPattern")); + } + return new LoadCompConf(id, parallelism, streams, memoryMb, cpuPercent, slp); + } + + /** + * Build a config map for this object. + * @return the config map. + */ + public Map toConf() { + Map ret = new HashMap<>(); + ret.put("id", id); + ret.put("parallelism", parallelism); + if (memoryLoad > 0) { + ret.put("memoryLoad", memoryLoad); + } + if (cpuLoad > 0) { + ret.put("cpuLoad", cpuLoad); + } + + if (streams != null) { + List> streamData = new ArrayList<>(); + for (OutputStream out : streams) { + streamData.add(out.toConf()); + } + ret.put("streams", streamData); + } + if (slp != null) { + ret.put("slowExecutorPattern", slp.toConf()); + } + return ret; + } + + /** + * Chenge the name of components and streams according to the parameters passed in. + * @param remappedComponents original component name to new component name. + * @param remappedStreams original stream id to new stream id. + * @return a copy of this with the values remapped. + */ + public LoadCompConf remap(Map remappedComponents, Map remappedStreams) { + String remappedId = remappedComponents.get(id); + List remappedOutStreams = (streams == null) ? null : + streams.stream() + .map((orig) -> orig.remap(id, remappedStreams)) + .collect(Collectors.toList()); + + return new LoadCompConf(remappedId, parallelism, remappedOutStreams, cpuLoad, memoryLoad, slp); + } + + /** + * Scale the parallelism of this component by v. The aggregate throughput will be the same. + * The parallelism will be rounded up to the next largest whole number. Parallelism will always be at least 1. + * @param v 1.0 is not change 0.5 is drop the parallelism by half. + * @return a copy of this with the parallelism adjusted. + */ + public LoadCompConf scaleParallel(double v) { + return setParallel(Math.max(1, (int) Math.ceil(parallelism * v))); + } + + /** + * Set the parallelism of this component, and adjust the throughput so in aggregate it stays the same. + * @param newParallelism the new parallelism to set. + * @return a copy of this with the adjustments made. + */ + public LoadCompConf setParallel(int newParallelism) { + //We need to adjust the throughput accordingly (so that it stays the same in aggregate) + double throughputAdjustment = ((double) parallelism) / newParallelism; + return new LoadCompConf(id, newParallelism, streams, cpuLoad, memoryLoad, slp).scaleThroughput(throughputAdjustment); + } + + /** + * Scale the throughput of this component. + * @param v 1.0 is unchanged 0.5 will cut the throughput in half. + * @return a copy of this with the adjustments made. + */ + public LoadCompConf scaleThroughput(double v) { + if (streams != null) { + List newStreams = streams.stream().map((s) -> s.scaleThroughput(v)).collect(Collectors.toList()); + return new LoadCompConf(id, parallelism, newStreams, cpuLoad, memoryLoad, slp); + } else { + return this; + } + } + + /** + * Override the SlowExecutorPattern with a new one. + * @param slp the new pattern or null if you don't want it to change + * @return a copy of this with the adjustments made. + */ + public LoadCompConf overrideSlowExecutorPattern(SlowExecutorPattern slp) { + if (slp != null) { + return new LoadCompConf(id, parallelism, streams, cpuLoad, memoryLoad, slp); + } else { + return this; + } + } + + /** + * Compute the total amount of all messages emitted in all streams per second. + * @return the sum of all messages emitted per second. + */ + public double getAllEmittedAggregate() { + double ret = 0; + if (streams != null) { + for (OutputStream out: streams) { + if (out.rate != null) { + ret += out.rate.mean * parallelism; + } + } + } + return ret; + } + + public static class Builder { + private String id; + private int parallelism = 1; + private List streams; + private double cpuLoad = 0.0; + private double memoryLoad = 0.0; + private SlowExecutorPattern slp = null; + + public String getId() { + return id; + } + + public Builder withId(String id) { + this.id = id; + return this; + } + + public int getParallelism() { + return parallelism; + } + + public Builder withParallelism(int parallelism) { + this.parallelism = parallelism; + return this; + } + + public List getStreams() { + return streams; + } + + /** + * Add in a single OutputStream to this component. + * @param stream the stream to add + * @return this + */ + public Builder withStream(OutputStream stream) { + if (streams == null) { + streams = new ArrayList<>(); + } + streams.add(stream); + return this; + } + + public Builder withStreams(List streams) { + this.streams = streams; + return this; + } + + public Builder withCpuLoad(double cpuLoad) { + this.cpuLoad = cpuLoad; + return this; + } + + public Builder withMemoryLoad(double memoryLoad) { + this.memoryLoad = memoryLoad; + return this; + } + + public Builder withSlowExecutorPattern(SlowExecutorPattern slp) { + this.slp = slp; + return this; + } + + public LoadCompConf build() { + return new LoadCompConf(id, parallelism, streams, cpuLoad, memoryLoad, slp); + } + } + + /** + * Create a new LoadCompConf with the given values. + * @param id the id of the component. + * @param parallelism tha parallelism of the component. + * @param streams the output streams of the component. + */ + public LoadCompConf(String id, int parallelism, List streams, double cpuLoad, double memoryLoad, + SlowExecutorPattern slp) { + this.id = id; + if (id == null) { + throw new IllegalArgumentException("A spout ID cannot be null"); + } + this.parallelism = parallelism; + this.streams = streams; + this.cpuLoad = cpuLoad; + this.memoryLoad = memoryLoad; + this.slp = slp; + } +} diff --git a/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/LoadMetricsServer.java b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/LoadMetricsServer.java new file mode 100644 index 00000000000..2fb69a21bcd --- /dev/null +++ b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/LoadMetricsServer.java @@ -0,0 +1,1058 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.loadgen; + +import com.google.common.annotations.VisibleForTesting; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.FilterOutputStream; +import java.io.OutputStream; +import java.io.PrintStream; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiFunction; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.HdrHistogram.Histogram; +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.Option; +import org.apache.commons.cli.Options; +import org.apache.commons.io.output.TeeOutputStream; +import org.apache.storm.generated.ClusterSummary; +import org.apache.storm.generated.ExecutorSummary; +import org.apache.storm.generated.Nimbus; +import org.apache.storm.generated.SpoutStats; +import org.apache.storm.generated.TopologyInfo; +import org.apache.storm.generated.TopologyPageInfo; +import org.apache.storm.generated.TopologySummary; +import org.apache.storm.metric.api.IMetricsConsumer; +import org.apache.storm.utils.Utils; +import org.apache.storm.utils.VersionInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A metrics server that records and reports metrics for a set of running topologies. + */ +public class LoadMetricsServer extends HttpForwardingMetricsServer { + private static final Logger LOG = LoggerFactory.getLogger(LoadMetricsServer.class); + + private static class MemMeasure { + private long mem = 0; + private long time = 0; + + synchronized void update(long mem) { + this.mem = mem; + time = System.currentTimeMillis(); + } + + public synchronized long get() { + return isExpired() ? 0L : mem; + } + + synchronized boolean isExpired() { + return (System.currentTimeMillis() - time) >= 20000; + } + } + + @VisibleForTesting + static double convert(double value, TimeUnit from, TimeUnit target) { + if (target.compareTo(from) > 0) { + return value / from.convert(1, target); + } + return value * target.convert(1, from); + } + + public static class Measurements { + private final Histogram histo; + private double uiCompleteLatency; + private long skippedMaxSpoutMs; + private double userMs; + private double sysMs; + private double gcMs; + private long memBytes; + private long uptimeSecs; + private long timeWindow; + private long acked; + private long failed; + private Set topologyIds; + private long workers; + private long executors; + private long hosts; + private Map congested; + + /** + * Constructor. + * @param histo latency histogram. + * @param userMs user CPU in ms. + * @param sysMs system CPU in ms. + * @param gcMs GC CPU in ms. + */ + public Measurements(long uptimeSecs, long acked, long timeWindow, long failed, Histogram histo, + double userMs, double sysMs, double gcMs, long memBytes, Set topologyIds, + long workers, long executors, long hosts, Map congested, long skippedMaxSpoutMs, + double uiCompleteLatency) { + this.uptimeSecs = uptimeSecs; + this.acked = acked; + this.timeWindow = timeWindow; + this.failed = failed; + this.userMs = userMs; + this.sysMs = sysMs; + this.gcMs = gcMs; + this.histo = histo; + this.memBytes = memBytes; + this.topologyIds = topologyIds; + this.workers = workers; + this.executors = executors; + this.hosts = hosts; + this.congested = congested; + this.skippedMaxSpoutMs = skippedMaxSpoutMs; + this.uiCompleteLatency = uiCompleteLatency; + } + + /** + * Default Constructor. + */ + public Measurements() { + histo = new Histogram(3600000000000L, 3); + sysMs = 0; + userMs = 0; + gcMs = 0; + memBytes = 0; + uptimeSecs = 0; + timeWindow = 0; + acked = 0; + failed = 0; + topologyIds = new HashSet<>(); + workers = 0; + executors = 0; + hosts = 0; + congested = new HashMap<>(); + skippedMaxSpoutMs = 0; + uiCompleteLatency = 0.0; + } + + /** + * Add other to this. + * @param other meaurements to add in. + */ + public void add(Measurements other) { + histo.add(other.histo); + sysMs += other.sysMs; + userMs += other.userMs; + gcMs += other.gcMs; + memBytes = Math.max(memBytes, other.memBytes); + acked += other.acked; + failed += other.failed; + uptimeSecs = Math.max(uptimeSecs, other.uptimeSecs); + timeWindow += other.timeWindow; + topologyIds.addAll(other.topologyIds); + workers = Math.max(workers, other.workers); + executors = Math.max(executors, other.executors); + hosts = Math.max(hosts, other.hosts); + congested.putAll(other.congested); + skippedMaxSpoutMs += other.skippedMaxSpoutMs; + uiCompleteLatency = Math.max(uiCompleteLatency, other.uiCompleteLatency); + } + + public double getLatencyAtPercentile(double percential, TimeUnit unit) { + return convert(histo.getValueAtPercentile(percential), TimeUnit.NANOSECONDS, unit); + } + + public double getMinLatency(TimeUnit unit) { + return convert(histo.getMinValue(), TimeUnit.NANOSECONDS, unit); + } + + public double getMaxLatency(TimeUnit unit) { + return convert(histo.getMaxValue(), TimeUnit.NANOSECONDS, unit); + } + + public double getMeanLatency(TimeUnit unit) { + return convert(histo.getMean(), TimeUnit.NANOSECONDS, unit); + } + + public double getLatencyStdDeviation(TimeUnit unit) { + return convert(histo.getStdDeviation(), TimeUnit.NANOSECONDS, unit); + } + + public double getUiCompleteLatency(TimeUnit unit) { + return convert(uiCompleteLatency, TimeUnit.MILLISECONDS, unit); + } + + public double getUserTime(TimeUnit unit) { + return convert(userMs, TimeUnit.MILLISECONDS, unit); + } + + public double getSysTime(TimeUnit unit) { + return convert(sysMs, TimeUnit.MILLISECONDS, unit); + } + + public double getGc(TimeUnit unit) { + return convert(gcMs, TimeUnit.MILLISECONDS, unit); + } + + public double getSkippedMaxSpout(TimeUnit unit) { + return convert(skippedMaxSpoutMs, TimeUnit.MILLISECONDS, unit); + } + + public double getMemMb() { + return memBytes / (1024.0 * 1024.0); + } + + public long getUptimeSecs() { + return uptimeSecs; + } + + public long getCompleted() { + return histo.getTotalCount(); + } + + public double getCompletedPerSec() { + return getCompleted() / (double) timeWindow; + } + + public long getAcked() { + return acked; + } + + public double getAckedPerSec() { + return acked / (double) timeWindow; + } + + public long getFailed() { + return failed; + } + + public long startTime() { + return uptimeSecs - timeWindow; + } + + public long endTime() { + return uptimeSecs; + } + + public double getTimeWindow() { + return timeWindow; + } + + public Set getTopologyIds() { + return topologyIds; + } + + public long getWorkers() { + return workers; + } + + public long getHosts() { + return hosts; + } + + public long getExecutors() { + return executors; + } + + public Map getCongested() { + return congested; + } + + static Measurements combine(List measurements, Integer start, Integer count) { + if (count == null) { + count = measurements.size(); + } + + if (start == null) { + start = measurements.size() - count; + } + start = Math.max(0, start); + count = Math.min(count, measurements.size() - start); + + Measurements ret = new Measurements(); + for (int i = start; i < start + count; i ++) { + ret.add(measurements.get(i)); + } + return ret; + } + } + + interface MetricResultsReporter { + void start(); + + void reportWindow(Measurements inWindow, List allTime); + + void finish(List allTime) throws Exception; + } + + private static class NoCloseOutputStream extends FilterOutputStream { + NoCloseOutputStream(OutputStream out) { + super(out); + } + + @Override + public void close() { + //NOOP on purpose + } + } + + abstract static class FileReporter implements MetricResultsReporter { + protected final PrintStream out; + protected final Map allExtractors; + public final boolean includesSysOutOrError; + + FileReporter(Map allExtractors) throws FileNotFoundException { + this(null, Collections.emptyMap(), allExtractors); + } + + FileReporter(String path, Map query, Map allExtractors) + throws FileNotFoundException { + boolean append = Boolean.parseBoolean(query.getOrDefault("append", "false")); + boolean tee = Boolean.parseBoolean(query.getOrDefault("tee", "false")); + boolean includesSysOutOrError = false; + + OutputStream out = null; + if (path == null || "/dev/stdout".equals(path)) { + out = new NoCloseOutputStream(System.out); + includesSysOutOrError = true; + tee = false; + } else if ("/dev/stderr".equals(path)) { + out = new NoCloseOutputStream(System.err); + includesSysOutOrError = true; + tee = false; + } else { + out = new FileOutputStream(path, append); + } + + if (tee) { + out = new TeeOutputStream(new NoCloseOutputStream(System.out), out); + includesSysOutOrError = true; + } + this.out = new PrintStream(out); + //Copy it in case we want to modify it + this.allExtractors = new LinkedHashMap<>(allExtractors); + this.includesSysOutOrError = includesSysOutOrError; + } + + @Override + public void start() { + //NOOP + } + + @Override + public void finish(List allTime) throws Exception { + if (out != null) { + out.close(); + } + } + } + + private static final Map UNIT_MAP; + + static { + HashMap tmp = new HashMap<>(); + tmp.put("NS", TimeUnit.NANOSECONDS); + tmp.put("NANO", TimeUnit.NANOSECONDS); + tmp.put("NANOSEC", TimeUnit.NANOSECONDS); + tmp.put("NANOSECOND", TimeUnit.NANOSECONDS); + tmp.put("NANOSECONDS", TimeUnit.NANOSECONDS); + tmp.put("μS", TimeUnit.MICROSECONDS); + tmp.put("US", TimeUnit.MICROSECONDS); + tmp.put("MICRO", TimeUnit.MICROSECONDS); + tmp.put("MICROSEC", TimeUnit.MICROSECONDS); + tmp.put("MICROSECOND", TimeUnit.MICROSECONDS); + tmp.put("MICROSECONDS", TimeUnit.MICROSECONDS); + tmp.put("MS", TimeUnit.MILLISECONDS); + tmp.put("MILLI", TimeUnit.MILLISECONDS); + tmp.put("MILLISEC", TimeUnit.MILLISECONDS); + tmp.put("MILLISECOND", TimeUnit.MILLISECONDS); + tmp.put("MILLISECONDS", TimeUnit.MILLISECONDS); + tmp.put("S", TimeUnit.SECONDS); + tmp.put("SEC", TimeUnit.SECONDS); + tmp.put("SECOND", TimeUnit.SECONDS); + tmp.put("SECONDS", TimeUnit.SECONDS); + tmp.put("M", TimeUnit.MINUTES); + tmp.put("MIN", TimeUnit.MINUTES); + tmp.put("MINUTE", TimeUnit.MINUTES); + tmp.put("MINUTES", TimeUnit.MINUTES); + UNIT_MAP = Collections.unmodifiableMap(tmp); + } + + private static final Map TIME_UNIT_NAME; + + static { + HashMap tmp = new HashMap<>(); + tmp.put(TimeUnit.NANOSECONDS, "ns"); + tmp.put(TimeUnit.MICROSECONDS, "μs"); + tmp.put(TimeUnit.MILLISECONDS, "ms"); + tmp.put(TimeUnit.SECONDS, "s"); + tmp.put(TimeUnit.MINUTES, "m"); + TIME_UNIT_NAME = Collections.unmodifiableMap(tmp); + } + + private static final Map NAMED_EXTRACTORS; + + static { + //Perhaps there is a better way to do this??? + LinkedHashMap tmp = new LinkedHashMap<>(); + tmp.put("start_time", new MetricExtractor((m, unit) -> m.startTime(), "s")); + tmp.put("end_time", new MetricExtractor((m, unit) -> m.endTime(), "s")); + tmp.put("rate", new MetricExtractor((m, unit) -> m.getCompletedPerSec(), "tuple/s")); + tmp.put("mean", new MetricExtractor((m, unit) -> m.getMeanLatency(unit))); + tmp.put("99%ile", new MetricExtractor((m, unit) -> m.getLatencyAtPercentile(99.0, unit))); + tmp.put("99.9%ile", new MetricExtractor((m, unit) -> m.getLatencyAtPercentile(99.9, unit))); + tmp.put("cores", new MetricExtractor( + (m, unit) -> (m.getSysTime(TimeUnit.SECONDS) + m.getUserTime(TimeUnit.SECONDS)) / m.getTimeWindow(), + "")); + tmp.put("mem", new MetricExtractor((m, unit) -> m.getMemMb(), "MB")); + tmp.put("failed", new MetricExtractor((m, unit) -> m.getFailed(), "")); + tmp.put("median", new MetricExtractor((m, unit) -> m.getLatencyAtPercentile(50, unit))); + tmp.put("min", new MetricExtractor((m, unit) -> m.getMinLatency(unit))); + tmp.put("max", new MetricExtractor((m, unit) -> m.getMaxLatency(unit))); + tmp.put("stddev", new MetricExtractor((m, unit) -> m.getLatencyStdDeviation(unit))); + tmp.put("user_cpu", new MetricExtractor((m, unit) -> m.getUserTime(unit))); + tmp.put("sys_cpu", new MetricExtractor((m, unit) -> m.getSysTime(unit))); + tmp.put("gc_cpu", new MetricExtractor((m, unit) -> m.getGc(unit))); + tmp.put("skipped_max_spout", new MetricExtractor((m, unit) -> m.getSkippedMaxSpout(unit))); + tmp.put("acked", new MetricExtractor((m, unit) -> m.getAcked(), "")); + tmp.put("acked_rate", new MetricExtractor((m, unit) -> m.getAckedPerSec(), "tuple/s")); + tmp.put("completed", new MetricExtractor((m, unit) -> m.getCompleted(), "")); + tmp.put("uptime", new MetricExtractor((m, unit) -> m.getUptimeSecs(), "s")); + tmp.put("time_window", new MetricExtractor((m, unit) -> m.getTimeWindow(), "s")); + tmp.put("ids", new MetricExtractor((m, unit) -> m.getTopologyIds(), "")); + tmp.put("congested", new MetricExtractor((m, unit) -> m.getCongested(), "")); + tmp.put("workers", new MetricExtractor((m, unit) -> m.getWorkers(), "")); + tmp.put("hosts", new MetricExtractor((m, unit) -> m.getHosts(), "")); + tmp.put("executors", new MetricExtractor((m, unit) -> m.getExecutors(), "")); + String buildVersion = VersionInfo.getBuildVersion(); + tmp.put("storm_version", new MetricExtractor((m, unit) -> buildVersion, "")); + tmp.put("java_version", new MetricExtractor((m, unit) -> System.getProperty("java.vendor") + + " " + System.getProperty("java.version"), "")); + tmp.put("os_arch", new MetricExtractor((m, unit) -> System.getProperty("os.arch"), "")); + tmp.put("os_name", new MetricExtractor((m, unit) -> System.getProperty("os.name"), "")); + tmp.put("os_version", new MetricExtractor((m, unit) -> System.getProperty("os.version"), "")); + tmp.put("config_override", new MetricExtractor((m, unit) -> Utils.readCommandLineOpts(), "")); + tmp.put("ui_complete_latency", new MetricExtractor((m, unit) -> m.getUiCompleteLatency(unit))); + NAMED_EXTRACTORS = Collections.unmodifiableMap(tmp); + } + + static class MetricExtractor { + private final String unit; + private final BiFunction func; + + MetricExtractor(BiFunction func) { + this.func = func; + this.unit = null; + } + + MetricExtractor(BiFunction func, String unit) { + this.func = func; + this.unit = unit; + } + + public Object get(Measurements m, TimeUnit unit) { + return func.apply(m, unit); + } + + public String formatName(String name, TimeUnit targetUnit) { + StringBuilder ret = new StringBuilder(); + ret.append(name); + if (unit == null || !unit.isEmpty()) { + ret.append("("); + if (unit == null) { + ret.append(TIME_UNIT_NAME.get(targetUnit)); + } else { + ret.append(unit); + } + ret.append(")"); + } + return ret.toString(); + } + } + + abstract static class ColumnsFileReporter extends FileReporter { + protected final TimeUnit targetUnit; + protected final List extractors; + protected final String meta; + protected final int precision; + protected String doubleFormat; + + ColumnsFileReporter(String path, Map query, Map extractorsMap) + throws FileNotFoundException { + this(path, query, extractorsMap, null); + } + + ColumnsFileReporter(String path, Map query, Map extractorsMap, + String defaultPreceision) throws FileNotFoundException { + super(path, query, extractorsMap); + targetUnit = UNIT_MAP.get(query.getOrDefault("time", "MILLISECONDS").toUpperCase()); + if (targetUnit == null) { + throw new IllegalArgumentException(query.get("time") + " is not a supported time unit"); + } + if (query.containsKey("columns")) { + List extractors = handleExtractorCleanup(Arrays.asList(query.get("columns").split("\\s*,\\s*"))); + + HashSet notFound = new HashSet<>(extractors); + notFound.removeAll(allExtractors.keySet()); + if (notFound.size() > 0) { + throw new IllegalArgumentException(notFound + " columns are not supported"); + } + this.extractors = extractors; + } else { + //Wrapping it makes it mutable + extractors = new ArrayList<>(Arrays.asList("start_time", "end_time", "rate", + "mean", "99%ile", "99.9%ile", "cores", "mem", "failed", "ids", "congested")); + } + + if (query.containsKey("extraColumns")) { + List moreExtractors = + handleExtractorCleanup(Arrays.asList(query.get("extraColumns").split("\\s*,\\s*"))); + for (String extractor: moreExtractors) { + if (!allExtractors.containsKey(extractor)) { + throw new IllegalArgumentException(extractor + " is not a supported column"); + } + if (!extractors.contains(extractor)) { + extractors.add(extractor); + } + } + } + String strPrecision = query.getOrDefault("precision", defaultPreceision); + if (strPrecision == null) { + precision = -1; + doubleFormat = "%f"; + } else { + precision = Integer.parseInt(strPrecision); + doubleFormat = "%." + precision + "f"; + } + meta = query.get("meta"); + } + + protected List handleExtractorCleanup(List orig) { + Map stormConfig = Utils.readStormConfig(); + List ret = new ArrayList<>(orig.size()); + for (String extractor: orig) { + if (extractor.startsWith("conf:")) { + String confKey = extractor.substring("conf:".length()); + Object confValue = stormConfig.get(confKey); + allExtractors.put(extractor, new MetricExtractor((m, t) -> confValue, "")); + ret.add(extractor); + } else if (extractor.endsWith("%ile")) { + double number = Double.valueOf(extractor.substring(0, extractor.length() - "%ile".length())); + allExtractors.put(extractor, new MetricExtractor((m, t) -> m.getLatencyAtPercentile(number, t))); + ret.add(extractor); + } else if ("*".equals(extractor)) { + ret.addAll(allExtractors.keySet()); + } else { + ret.add(extractor); + } + } + return ret; + } + + protected String format(Object o) { + if (o instanceof Double || o instanceof Float) { + return String.format(doubleFormat, o); + } else { + return o == null ? "" : o.toString(); + } + } + } + + + static class FixedWidthReporter extends ColumnsFileReporter { + public final String longFormat; + public final String stringFormat; + + FixedWidthReporter(String path, Map query, Map extractorsMap) + throws FileNotFoundException { + super(path, query, extractorsMap, "3"); + int columnWidth = Integer.parseInt(query.getOrDefault("columnWidth", "15")) - 1; //Always have a space in between + doubleFormat = "%," + columnWidth + "." + precision + "f"; + longFormat = "%," + columnWidth + "d"; + stringFormat = "%" + columnWidth + "s"; + } + + FixedWidthReporter(Map allExtractors) throws FileNotFoundException { + this(null, Collections.emptyMap(), allExtractors); + } + + @Override + protected String format(Object o) { + if (o instanceof Double || o instanceof Float) { + return String.format(doubleFormat, o); + } else if (o instanceof Integer || o instanceof Long) { + return String.format(longFormat, o); + } else { + return String.format(stringFormat, o); + } + } + + @Override + public void start() { + boolean first = true; + for (String name: extractors) { + if (!first) { + out.print(" "); + } + first = false; + out.print(format(allExtractors.get(name).formatName(name, targetUnit))); + } + if (meta != null) { + out.print(" "); + out.print(format("meta")); + } + out.println(); + } + + @Override + public void reportWindow(Measurements m, List allTime) { + boolean first = true; + for (String name: extractors) { + if (!first) { + out.print(" "); + } + first = false; + out.print(format(allExtractors.get(name).get(m, targetUnit))); + } + if (meta != null) { + out.print(" "); + out.print(format(meta)); + } + out.println(); + } + } + + static class SepValReporter extends ColumnsFileReporter { + private final String separator; + + SepValReporter(String separator, String path, Map query, Map extractorsMap) + throws FileNotFoundException { + super(path, query, extractorsMap); + this.separator = separator; + } + + @Override + public void start() { + boolean first = true; + for (String name: extractors) { + if (!first) { + out.print(separator); + } + first = false; + out.print(allExtractors.get(name).formatName(name, targetUnit)); + } + if (meta != null) { + out.print(separator); + out.print("meta"); + } + out.println(); + } + + @Override + public void reportWindow(Measurements m, List allTime) { + boolean first = true; + for (String name: extractors) { + if (!first) { + out.print(separator); + } + first = false; + Object value = allExtractors.get(name).get(m, targetUnit); + String svalue = format(value); + out.print(escape(svalue)); + } + if (meta != null) { + out.print(separator); + out.print(escape(meta)); + } + out.println(); + } + + private String escape(String svalue) { + return svalue.replace("\\", "\\\\").replace(separator, "\\" + separator); + } + } + + static class LegacyReporter extends FileReporter { + private final TimeUnit targetUnitOverride; + + LegacyReporter(Map allExtractors) throws FileNotFoundException { + super(allExtractors); + targetUnitOverride = null; + } + + LegacyReporter(String path, Map query, Map allExtractors) + throws FileNotFoundException { + super(path, query, allExtractors); + if (query.containsKey("time")) { + targetUnitOverride = UNIT_MAP.get(query.get("time").toUpperCase()); + if (targetUnitOverride == null) { + throw new IllegalArgumentException(query.get("time") + " is not a supported time unit"); + } + } else { + targetUnitOverride = null; + } + } + + @Override + public void reportWindow(Measurements m, List allTime) { + TimeUnit nsOr = TimeUnit.NANOSECONDS; + TimeUnit msOr = TimeUnit.MILLISECONDS; + if (targetUnitOverride != null) { + nsOr = targetUnitOverride; + msOr = targetUnitOverride; + } + + Measurements total = Measurements.combine(allTime, null, null); + out.printf("uptime: %,4d acked: %,9d acked/sec: %,10.2f failed: %,8d " + + "99%%: %,15.0f 99.9%%: %,15.0f min: %,15.0f max: %,15.0f mean: %,15.2f " + + "stddev: %,15.2f user: %,10.0f sys: %,10.0f gc: %,10.0f mem: %,10.2f\n", + m.getUptimeSecs(), m.getAcked(), m.getAckedPerSec(), total.getFailed(), + m.getLatencyAtPercentile(99.0, nsOr), + m.getLatencyAtPercentile(99.9, nsOr), + m.getMinLatency(nsOr), + m.getMaxLatency(nsOr), + m.getMeanLatency(nsOr), + m.getLatencyStdDeviation(nsOr), + m.getUserTime(msOr), + m.getSysTime(msOr), + m.getGc(msOr), + m.getMemMb()); + } + } + + /** + * Add Command line options for configuring the output of this. + * @param options command line options to update + */ + public static void addCommandLineOptions(Options options) { + //We want to be able to select the measurement interval + // reporting window (We don't need 3 different reports) + // We want to be able to specify format (and configs specific to the format) + // With perhaps defaults overall + options.addOption(Option.builder("r") + .longOpt("report-interval") + .hasArg() + .argName("SECS") + .desc("How long in between reported metrics. Will be rounded up to the next 10 sec boundary.\n" + + "default " + DEFAULT_REPORT_INTERVAL) + .build()); + + options.addOption(Option.builder("w") + .longOpt("report-window") + .hasArg() + .argName("SECS") + .desc("How long of a rolling window should be in each report. Will be rounded up to the next report interval boundary.\n" + + "default " + DEFAULT_WINDOW_INTERVAL) + .build()); + + options.addOption(Option.builder() + .longOpt("reporter") + .hasArg() + .argName("TYPE:PATH?OPTIONS") + .desc("Provide the config for a reporter to run. Supported types are:\n" + + "FIXED - a fixed width format that should be more human readable\n" + + "LEGACY - (write things out in the legacy format)\n" + + "TSV - tab separated values\n" + + "CSV - comma separated values\n" + + "PATH and OPTIONS are each optional but must be marked with a ':' or '?' separator respectively.") + .build()); + + } + + public static final long DEFAULT_REPORT_INTERVAL = 30; + public static final long DEFAULT_WINDOW_INTERVAL = DEFAULT_REPORT_INTERVAL; + private static final Pattern REPORTER_PATTERN = Pattern.compile( + "(?[^:?]+)(?::(?[^?]+))?(?:\\?(?.*))?"); + + private final Histogram histo = new Histogram(3600000000000L, 3); + private final AtomicLong systemCpu = new AtomicLong(0); + private final AtomicLong userCpu = new AtomicLong(0); + private final AtomicLong gcCount = new AtomicLong(0); + private final AtomicLong gcMs = new AtomicLong(0); + private final AtomicLong skippedMaxSpoutMs = new AtomicLong(0); + private final ConcurrentHashMap memoryBytes = new ConcurrentHashMap<>(); + private final AtomicReference> congested = new AtomicReference<>(new ConcurrentHashMap<>()); + private final List reporters; + private long prevAcked = 0; + private long prevFailed = 0; + private long prevUptime = 0; + private int windowLength = 1; + private long reportIntervalSecs = DEFAULT_REPORT_INTERVAL; + + private final LinkedList allCombined = new LinkedList<>(); + + LoadMetricsServer(Map conf, CommandLine commandLine, Map parameterMetrics) throws URISyntaxException, + FileNotFoundException { + super(conf); + Map allExtractors = new LinkedHashMap<>(NAMED_EXTRACTORS); + for (Map.Entry entry: parameterMetrics.entrySet()) { + final Object value = entry.getValue(); + allExtractors.put(entry.getKey(), new MetricExtractor((m, unit) -> value, "")); + } + if (commandLine.hasOption("r")) { + reportIntervalSecs = Long.parseLong(commandLine.getOptionValue("r")); + reportIntervalSecs = ((reportIntervalSecs + 1) / 10) * 10; + } + if (commandLine.hasOption("w")) { + long window = Long.parseLong(commandLine.getOptionValue("w")); + windowLength = (int) ((window + 1) / reportIntervalSecs); + } + reporters = new ArrayList<>(); + if (commandLine.hasOption("reporter")) { + for (String reporterString: commandLine.getOptionValues("reporter")) { + Matcher m = REPORTER_PATTERN.matcher(reporterString); + if (!m.matches()) { + throw new IllegalArgumentException(reporterString + " does not look like it is a reporter"); + } + String type = m.group("type"); + String path = m.group("path"); + Map query = new HashMap<>(); + String queryString = m.group("query"); + if (queryString != null) { + for (String param : queryString.split("&")) { + String[] pair = param.split("="); + String key = pair[0]; + String value = pair.length > 1 ? pair[1] : "true"; + query.put(key, value); + } + } + type = type.toUpperCase(); + switch (type) { + case "FIXED": + reporters.add(new FixedWidthReporter(path, query, allExtractors)); + break; + case "LEGACY": + reporters.add(new LegacyReporter(path, query, allExtractors)); + break; + case "TSV": + reporters.add(new SepValReporter("\t", path, query, allExtractors)); + break; + case "CSV": + reporters.add(new SepValReporter(",", path, query, allExtractors)); + break; + default: + throw new RuntimeException(type + " is not a supported reporter type"); + } + } + } + boolean foundStdOutOrErr = false; + for (MetricResultsReporter rep : reporters) { + if (rep instanceof FileReporter) { + foundStdOutOrErr = ((FileReporter) rep).includesSysOutOrError; + if (foundStdOutOrErr) { + break; + } + } + } + if (!foundStdOutOrErr) { + reporters.add(new FixedWidthReporter(allExtractors)); + } + } + + private long readMemory() { + long total = 0; + for (MemMeasure mem: memoryBytes.values()) { + total += mem.get(); + } + return total; + } + + private void startMetricsOutput() { + for (MetricResultsReporter reporter: reporters) { + reporter.start(); + } + } + + private void finishMetricsOutput() throws Exception { + for (MetricResultsReporter reporter: reporters) { + reporter.finish(allCombined); + } + } + + /** + * Monitor the list of topologies for the given time frame. + * @param execTimeMins how long to monitor for + * @param client the client to use when monitoring + * @param topoNames the names of the topologies to monitor + * @throws Exception on any error + */ + public void monitorFor(double execTimeMins, Nimbus.Iface client, Collection topoNames) throws Exception { + startMetricsOutput(); + long iterations = (long) ((execTimeMins * 60) / reportIntervalSecs); + for (int i = 0; i < iterations; i++) { + Thread.sleep(reportIntervalSecs * 1000); + outputMetrics(client, topoNames); + } + finishMetricsOutput(); + } + + private void outputMetrics(Nimbus.Iface client, Collection names) throws Exception { + Set ids = new HashSet<>(); + HashSet workers = new HashSet<>(); + HashSet hosts = new HashSet<>(); + int executors = 0; + int uptime = 0; + long acked = 0; + long failed = 0; + double totalLatMs = 0; + long totalLatCount = 0; + for (String name: names) { + TopologyInfo info = client.getTopologyInfoByName(name); + ids.add(info.get_id()); + @SuppressWarnings("checkstyle:VariableDeclarationUsageDistance") + TopologyPageInfo tpi = client.getTopologyPageInfo(info.get_id(), ":all-time", false); + uptime = Math.max(uptime, info.get_uptime_secs()); + for (ExecutorSummary exec : info.get_executors()) { + hosts.add(exec.get_host()); + workers.add(exec.get_host() + exec.get_port()); + executors++; + if (exec.get_stats() != null && exec.get_stats().get_specific() != null + && exec.get_stats().get_specific().is_set_spout()) { + SpoutStats stats = exec.get_stats().get_specific().get_spout(); + Map failedMap = stats.get_failed().get(":all-time"); + Map ackedMap = stats.get_acked().get(":all-time"); + if (ackedMap != null) { + for (String key : ackedMap.keySet()) { + if (failedMap != null) { + Long tmp = failedMap.get(key); + if (tmp != null) { + failed += tmp; + } + } + long ackVal = ackedMap.get(key); + acked += ackVal; + } + } + } + } + Double latency = tpi.get_topology_stats().get_window_to_complete_latencies_ms().get(":all-time"); + Long latAcked = tpi.get_topology_stats().get_window_to_acked().get(":all-time"); + if (latency != null && latAcked != null) { + totalLatCount += latAcked; + totalLatMs += (latAcked * latency); + } + } + @SuppressWarnings("checkstyle:VariableDeclarationUsageDistance") + long failedThisTime = failed - prevFailed; + @SuppressWarnings("checkstyle:VariableDeclarationUsageDistance") + long ackedThisTime = acked - prevAcked; + @SuppressWarnings("checkstyle:VariableDeclarationUsageDistance") + long thisTime = uptime - prevUptime; + prevUptime = uptime; + prevAcked = acked; + prevFailed = failed; + + Histogram copy = new Histogram(3600000000000L, 3);; + synchronized (histo) { + copy.add(histo); + histo.reset(); + } + long user = userCpu.getAndSet(0); + long sys = systemCpu.getAndSet(0); + long gc = gcMs.getAndSet(0); + long skippedMaxSpout = skippedMaxSpoutMs.getAndSet(0); + long memBytes = readMemory(); + + allCombined.add(new Measurements(uptime, ackedThisTime, thisTime, failedThisTime, copy, user, sys, gc, memBytes, + ids, workers.size(), executors, hosts.size(), congested.getAndSet(new ConcurrentHashMap<>()), skippedMaxSpout, + totalLatMs / totalLatCount)); + Measurements inWindow = Measurements.combine(allCombined, null, windowLength); + for (MetricResultsReporter reporter: reporters) { + reporter.reportWindow(inWindow, allCombined); + } + } + + @Override + @SuppressWarnings("unchecked") + public void handle(IMetricsConsumer.TaskInfo taskInfo, Collection dataPoints, String topologyId) { + String worker = taskInfo.srcWorkerHost + ":" + taskInfo.srcWorkerPort; + for (IMetricsConsumer.DataPoint dp: dataPoints) { + if (dp.name.startsWith("comp-lat-histo") && dp.value instanceof Histogram) { + synchronized (histo) { + histo.add((Histogram) dp.value); + } + } else if ("CPU".equals(dp.name) && dp.value instanceof Map) { + Map m = (Map) dp.value; + Object sys = m.get("sys-ms"); + if (sys instanceof Number) { + systemCpu.getAndAdd(((Number) sys).longValue()); + } + Object user = m.get("user-ms"); + if (user instanceof Number) { + userCpu.getAndAdd(((Number) user).longValue()); + } + } else if (dp.name.startsWith("GC/") && dp.value instanceof Map) { + Map m = (Map) dp.value; + Object count = m.get("count"); + if (count instanceof Number) { + gcCount.getAndAdd(((Number) count).longValue()); + } + Object time = m.get("timeMs"); + if (time instanceof Number) { + gcMs.getAndAdd(((Number) time).longValue()); + } + } else if (dp.name.startsWith("memory/") && dp.value instanceof Map) { + Map m = (Map) dp.value; + Object val = m.get("usedBytes"); + if (val instanceof Number) { + MemMeasure mm = memoryBytes.get(worker); + if (mm == null) { + mm = new MemMeasure(); + MemMeasure tmp = memoryBytes.putIfAbsent(worker, mm); + mm = tmp == null ? mm : tmp; + } + mm.update(((Number) val).longValue()); + } + } else if (dp.name.equals("__receive")) { + Map m = (Map) dp.value; + Object pop = m.get("population"); + Object cap = m.get("capacity"); + if (pop instanceof Number && cap instanceof Number) { + double full = ((Number) pop).doubleValue() / ((Number) cap).doubleValue(); + if (full >= 0.8) { + congested.get().put( + topologyId + ":" + taskInfo.srcComponentId + ":" + taskInfo.srcTaskId, + "receive " + pop + "/" + cap); + } + } + } else if (dp.name.equals("__skipped-max-spout-ms")) { + if (dp.value instanceof Number) { + skippedMaxSpoutMs.getAndAdd(((Number) dp.value).longValue()); + double full = ((Number) dp.value).doubleValue() / 10_000.0; //The frequency of reporting + if (full >= 0.8) { + congested.get().put( + topologyId + ":" + taskInfo.srcComponentId + ":" + taskInfo.srcTaskId, + "max.spout.pending " + (int) (full * 100) + "%"); + } + } + } + } + } +} diff --git a/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/LoadSpout.java b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/LoadSpout.java new file mode 100644 index 00000000000..5d639d38467 --- /dev/null +++ b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/LoadSpout.java @@ -0,0 +1,144 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.loadgen; + +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import java.util.stream.Collectors; +import org.apache.storm.metrics.hdrhistogram.HistogramMetric; +import org.apache.storm.spout.SpoutOutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.base.BaseRichSpout; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Values; + +/** + * A spout that simulates a real world spout based off of statistics about it. + */ +public class LoadSpout extends BaseRichSpout { + private static class OutputStreamEngineWithHisto extends OutputStreamEngine { + public final HistogramMetric histogram; + + OutputStreamEngineWithHisto(OutputStream stats, TopologyContext context) { + super(stats); + histogram = new HistogramMetric(3600000000000L, 3); + //TODO perhaps we can adjust the frequency later... + context.registerMetric("comp-lat-histo-" + stats.id, histogram, 10); + } + } + + private static class SentWithTime { + public final String streamName; + public final Values keyValue; + public final long time; + public final HistogramMetric histogram; + + SentWithTime(String streamName, Values keyValue, long time, HistogramMetric histogram) { + this.streamName = streamName; + this.keyValue = keyValue; + this.time = time; + this.histogram = histogram; + } + + public void done() { + histogram.recordValue(Math.max(0, System.nanoTime() - time)); + } + } + + private final List streamStats; + private List streams; + private SpoutOutputCollector collector; + //This is an attempt to give all of the streams an equal opportunity to emit something. + private long nextStreamCounter = 0; + private final int numStreams; + private final Queue replays = new ArrayDeque<>(); + + /** + * Create a simple load spout with just a set rate per second on the default stream. + * @param ratePerSecond the rate to send messages at. + */ + public LoadSpout(double ratePerSecond) { + OutputStream test = new OutputStream.Builder() + .withId("default") + .withRate(new NormalDistStats(ratePerSecond, 0.0, ratePerSecond, ratePerSecond)) + .build(); + streamStats = Arrays.asList(test); + numStreams = 1; + } + + public LoadSpout(LoadCompConf conf) { + this.streamStats = Collections.unmodifiableList(new ArrayList<>(conf.streams)); + numStreams = streamStats.size(); + } + + @Override + public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { + streams = Collections.unmodifiableList(streamStats.stream() + .map((ss) -> new OutputStreamEngineWithHisto(ss, context)).collect(Collectors.toList())); + this.collector = collector; + } + + @Override + public void nextTuple() { + if (!replays.isEmpty()) { + SentWithTime swt = replays.poll(); + collector.emit(swt.streamName, swt.keyValue, swt); + return; + } + int size = numStreams; + for (int tries = 0; tries < size; tries++) { + int index = Math.abs((int) (nextStreamCounter++ % size)); + OutputStreamEngineWithHisto se = streams.get(index); + Long emitTupleTime = se.shouldEmit(); + if (emitTupleTime != null) { + SentWithTime swt = + new SentWithTime(se.streamName, getNextValues(se), emitTupleTime, se.histogram); + collector.emit(swt.streamName, swt.keyValue, swt); + break; + } + } + } + + protected Values getNextValues(OutputStreamEngine se) { + return new Values(se.nextKey(), "JUST_SOME_VALUE"); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + for (OutputStream s: streamStats) { + declarer.declareStream(s.id, new Fields("key", "value")); + } + } + + @Override + public void ack(Object id) { + ((SentWithTime) id).done(); + } + + @Override + public void fail(Object id) { + replays.add((SentWithTime) id); + } +} diff --git a/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/NormalDistStats.java b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/NormalDistStats.java new file mode 100644 index 00000000000..d7555a555b9 --- /dev/null +++ b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/NormalDistStats.java @@ -0,0 +1,151 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.loadgen; + +import java.io.Serializable; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Random; +import org.apache.storm.utils.ObjectReader; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Stats related to something with a normal distribution, and a way to randomly simulate it. + */ +public class NormalDistStats implements Serializable { + private static final Logger LOG = LoggerFactory.getLogger(NormalDistStats.class); + public final double mean; + public final double stddev; + public final double min; + public final double max; + + /** + * Read the stats from a config. + * @param conf the config. + * @return the corresponding stats. + */ + public static NormalDistStats fromConf(Map conf) { + return fromConf(conf, null); + } + + /** + * Read the stats from a config. + * @param conf the config. + * @param def the default mean. + * @return the corresponding stats. + */ + public static NormalDistStats fromConf(Map conf, Double def) { + if (conf == null) { + conf = Collections.emptyMap(); + } + double mean = ObjectReader.getDouble(conf.get("mean"), def); + double stddev = ObjectReader.getDouble(conf.get("stddev"), mean / 4); + double min = ObjectReader.getDouble(conf.get("min"), 0.0); + double max = ObjectReader.getDouble(conf.get("max"), Double.MAX_VALUE); + return new NormalDistStats(mean, stddev, min, max); + } + + /** + * Return this as a config. + * @return the config version of this. + */ + public Map toConf() { + Map ret = new HashMap<>(); + ret.put("mean", mean); + ret.put("stddev", stddev); + ret.put("min", min); + ret.put("max", max); + return ret; + } + + /** + * Create an instance of this from a list of values. The metrics will be computed from the values. + * @param values the values to compute metrics from. + */ + public NormalDistStats(List values) { + //Compute the stats for these and save them + double min = values.isEmpty() ? 0.0 : values.get(0); + double max = values.isEmpty() ? 0.0 : values.get(0); + double sum = 0.0; + long count = values.size(); + for (Double v: values) { + sum += v; + min = Math.min(min, v); + max = Math.max(max, v); + } + double mean = sum / Math.max(count, 1); + double sdPartial = 0; + for (Double v: values) { + sdPartial += Math.pow(v - mean, 2); + } + double stddev = 0.0; + if (count >= 2) { + stddev = Math.sqrt(sdPartial / (count - 1)); + } + this.min = min; + this.max = max; + this.mean = mean; + this.stddev = stddev; + LOG.debug("Stats for {} are {}", values, this); + } + + /** + * A Constructor for the pre computed stats. + * @param mean the mean of the values. + * @param stddev the standard deviation of the values. + * @param min the min of the values. + * @param max the max of the values. + */ + public NormalDistStats(double mean, double stddev, double min, double max) { + this.mean = mean; + this.stddev = stddev; + this.min = min; + this.max = max; + } + + /** + * Generate a random number that follows the statistical distribution. + * @param rand the random number generator to use + * @return the next number that should follow the statistical distribution. + */ + public double nextRandom(Random rand) { + return Math.max(Math.min((rand.nextGaussian() * stddev) + mean, max), min); + } + + @Override + public String toString() { + return "mean: " + mean + " min: " + min + " max: " + max + " stddev: " + stddev; + } + + /** + * Scale the stats by v. This is not scaling everything proportionally. We don't want the stddev to increase + * so instead we scale the mean and shift everything up or down by the same amount. + * @param v the amount to scale by 1.0 is nothing 0.5 is half. + * @return a copy of this with the needed adjustments. + */ + public NormalDistStats scaleBy(double v) { + double newMean = mean * v; + double shiftAmount = newMean - mean; + return new NormalDistStats(Math.max(0, mean + shiftAmount), stddev, + Math.max(0, min + shiftAmount), Math.max(0, max + shiftAmount)); + } +} diff --git a/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/OutputStream.java b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/OutputStream.java new file mode 100644 index 00000000000..33f894f1200 --- /dev/null +++ b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/OutputStream.java @@ -0,0 +1,119 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.loadgen; + +import java.io.Serializable; +import java.util.HashMap; +import java.util.Map; +import org.apache.storm.generated.GlobalStreamId; + +/** + * A set of measurements about a stream so we can statistically reproduce it. + */ +public class OutputStream implements Serializable { + //The global stream id is this + the from component it must be a part of. + public final String id; + public final NormalDistStats rate; + public final boolean areKeysSkewed; + + /** + * Create an output stream from a config. + * @param conf the config to read from. + * @return the read OutputStream. + */ + public static OutputStream fromConf(Map conf) { + String streamId = (String) conf.getOrDefault("streamId", "default"); + NormalDistStats rate = NormalDistStats.fromConf((Map) conf.get("rate")); + boolean areKeysSkewed = (Boolean) conf.getOrDefault("areKeysSkewed", false); + return new OutputStream(streamId, rate, areKeysSkewed); + } + + /** + * Convert this to a conf. + * @return the conf. + */ + public Map toConf() { + Map ret = new HashMap<>(); + ret.put("streamId", id); + ret.put("rate", rate.toConf()); + ret.put("areKeysSkewed", areKeysSkewed); + return ret; + } + + public OutputStream remap(String origId, Map remappedStreams) { + GlobalStreamId remapped = remappedStreams.get(new GlobalStreamId(origId, id)); + return new OutputStream(remapped.get_streamId(), rate, areKeysSkewed); + } + + public OutputStream scaleThroughput(double v) { + return new OutputStream(id, rate.scaleBy(v), areKeysSkewed); + } + + public static class Builder { + private String id; + private NormalDistStats rate; + private boolean areKeysSkewed; + + public String getId() { + return id; + } + + public Builder withId(String id) { + this.id = id; + return this; + } + + public NormalDistStats getRate() { + return rate; + } + + public Builder withRate(NormalDistStats rate) { + this.rate = rate; + return this; + } + + public boolean isAreKeysSkewed() { + return areKeysSkewed; + } + + public Builder withAreKeysSkewed(boolean areKeysSkewed) { + this.areKeysSkewed = areKeysSkewed; + return this; + } + + public OutputStream build() { + return new OutputStream(id, rate, areKeysSkewed); + } + } + + /** + * Create a new stream with stats. + * @param id the id of the stream + * @param rate the rate of tuples being emitted on this stream + * @param areKeysSkewed true if keys are skewed else false. For skewed keys + * we only simulate it by using a gaussian distribution to the keys instead + * of an even distribution. Tere is no effort made right not to measure the + * skewness and reproduce it. + */ + public OutputStream(String id, NormalDistStats rate, boolean areKeysSkewed) { + this.id = id; + this.rate = rate; + this.areKeysSkewed = areKeysSkewed; + } +} diff --git a/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/OutputStreamEngine.java b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/OutputStreamEngine.java new file mode 100644 index 00000000000..ae23679ee28 --- /dev/null +++ b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/OutputStreamEngine.java @@ -0,0 +1,122 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.loadgen; + +import java.util.Random; +import java.util.concurrent.ThreadLocalRandom; + +/** + * Provides an API to simulate the output of a stream. + *

+ * Right now it is just rate, but in the future we expect to do data skew as well... + *

+ */ +public class OutputStreamEngine { + private static final double NANO_PER_SEC = 1_000_000_000.0; + private static final long UPDATE_RATE_PERIOD_NS = ((long) NANO_PER_SEC * 2); + private static final String[] KEYS = new String[2048]; + + static { + //We get a new random number and seed it to make sure that runs are consistent where possible. + Random r = new Random(KEYS.length); + for (int i = 0; i < KEYS.length; i++) { + KEYS[i] = String.valueOf(r.nextDouble()); + } + } + + private long periodNano; + private long emitAmount; + private final Random rand; + private long nextEmitTime; + private long nextRateRandomizeTime; + private long emitsLeft; + private final OutputStream stats; + public final String streamName; + + /** + * Create an engine that can simulate the given stats. + * @param stats the stats to follow + */ + public OutputStreamEngine(OutputStream stats) { + this.stats = stats; + rand = ThreadLocalRandom.current(); + selectNewRate(); + //Start emitting right now + nextEmitTime = System.nanoTime(); + nextRateRandomizeTime = nextEmitTime + UPDATE_RATE_PERIOD_NS; + emitsLeft = emitAmount; + streamName = stats.id; + } + + private void selectNewRate() { + double ratePerSecond = stats.rate.nextRandom(rand); + if (ratePerSecond > 0) { + periodNano = Math.max(1, (long) (NANO_PER_SEC / ratePerSecond)); + emitAmount = Math.max(1, (long) ((ratePerSecond / NANO_PER_SEC) * periodNano)); + } else { + //if it is is 0 or less it really is 1 per 10 seconds. + periodNano = (long) NANO_PER_SEC * 10; + emitAmount = 1; + } + } + + /** + * Should we emit or not. + * @return the start time of the message, or null of nothing should be emitted. + */ + public Long shouldEmit() { + long time = System.nanoTime(); + if (emitsLeft <= 0 && nextEmitTime <= time) { + emitsLeft = emitAmount; + nextEmitTime = nextEmitTime + periodNano; + } + + if (nextRateRandomizeTime <= time) { + //Once every UPDATE_RATE_PERIOD_NS + selectNewRate(); + nextRateRandomizeTime = nextEmitTime + UPDATE_RATE_PERIOD_NS; + } + + if (emitsLeft > 0) { + emitsLeft--; + return nextEmitTime - periodNano; + } + return null; + } + + /** + * Get the next key to emit. + * @return the key that should be emitted. + */ + public String nextKey() { + int keyIndex; + if (stats.areKeysSkewed) { + //We set the stddev of the skewed keys to be 1/5 of the length, but then we use the absolute value + // of that so everything is skewed towards 0 + keyIndex = Math.min(KEYS.length - 1 , Math.abs((int) (rand.nextGaussian() * KEYS.length / 5))); + } else { + keyIndex = rand.nextInt(KEYS.length); + } + return KEYS[keyIndex]; + } + + public int nextInt(int bound) { + return rand.nextInt(bound); + } +} \ No newline at end of file diff --git a/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/ScopedTopologySet.java b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/ScopedTopologySet.java new file mode 100644 index 00000000000..f7e79121453 --- /dev/null +++ b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/ScopedTopologySet.java @@ -0,0 +1,107 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.loadgen; + +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Set; +import org.apache.storm.generated.KillOptions; +import org.apache.storm.generated.Nimbus; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A set of topology names that will be killed when this is closed, or when the + * program exits. + */ +public class ScopedTopologySet extends HashSet implements AutoCloseable { + private static final Logger LOG = LoggerFactory.getLogger(ScopedTopologySet.class); + private static final KillOptions NO_WAIT_KILL = new KillOptions(); + + static { + NO_WAIT_KILL.set_wait_secs(0); + } + + private final Nimbus.Iface client; + private boolean closed = false; + + /** + * Constructor. + * @param client the client used to kill the topologies when this exist. + */ + public ScopedTopologySet(Nimbus.Iface client) { + this.client = client; + Runtime.getRuntime().addShutdownHook(new Thread(() -> { + try { + close(); + } catch (Exception e) { + LOG.error("Error trying to shutdown topologies on exit", e); + } + })); + } + + @Override + public boolean remove(Object o) { + throw new RuntimeException("Unmodifiable Set"); + } + + @Override + public void clear() { + throw new RuntimeException("Unmodifiable Set"); + } + + @Override + public boolean removeAll(Collection c) { + throw new RuntimeException("Unmodifiable Set"); + } + + @Override + public boolean retainAll(Collection c) { + throw new RuntimeException("Unmodifiable Set"); + } + + @Override + public void close() { + if (closed) { + return; + } + RuntimeException saved = null; + for (Iterator it = super.iterator(); it.hasNext();) { + String name = it.next(); + try { + client.killTopologyWithOpts(name, NO_WAIT_KILL); + it.remove(); + } catch (Exception e) { + RuntimeException wrapped = new RuntimeException("Error trying to kill " + name, e); + if (saved != null) { + saved.addSuppressed(wrapped); + } else { + saved = wrapped; + } + } + } + super.clear(); + if (saved != null) { + throw saved; + } + closed = true; + } +} diff --git a/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/SlowExecutorPattern.java b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/SlowExecutorPattern.java new file mode 100644 index 00000000000..d2c3ac502c3 --- /dev/null +++ b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/SlowExecutorPattern.java @@ -0,0 +1,83 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.loadgen; + +import java.io.Serializable; +import java.util.HashMap; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.apache.storm.utils.ObjectReader; + +/** + * A repeating pattern of skewedness in processing times. This is used to simulate an executor that slows down. + */ +public class SlowExecutorPattern implements Serializable { + private static final Pattern PARSER = Pattern.compile("\\s*(?[^:]+)\\s*(?::\\s*(?[0-9]+))?\\s*"); + public final double maxSlownessMs; + public final int count; + + /** + * Parses a string (command line) representation of "<SLOWNESS>(:<COUNT>)?". + * @param strRepresentation the string representation to parse + * @return the corresponding SlowExecutorPattern. + */ + public static SlowExecutorPattern fromString(String strRepresentation) { + Matcher m = PARSER.matcher(strRepresentation); + if (!m.matches()) { + throw new IllegalArgumentException(strRepresentation + " is not in the form (:)?"); + } + double slownessMs = Double.valueOf(m.group("slowness")); + String c = m.group("count"); + int count = c == null ? 1 : Integer.valueOf(c); + return new SlowExecutorPattern(slownessMs, count); + } + + /** + * Creates a SlowExecutorPattern from a Map config. + * @param conf the conf to parse. + * @return the corresponding SlowExecutorPattern. + */ + public static SlowExecutorPattern fromConf(Map conf) { + double slowness = ObjectReader.getDouble(conf.get("slownessMs"), 0.0); + int count = ObjectReader.getInt(conf.get("count"), 1); + return new SlowExecutorPattern(slowness, count); + } + + /** + * Convert this to a Config map. + * @return the corresponding Config map to this. + */ + public Map toConf() { + Map ret = new HashMap<>(); + ret.put("slownessMs", maxSlownessMs); + ret.put("count", count); + return ret; + } + + public SlowExecutorPattern(double maxSlownessMs, int count) { + this.count = count; + this.maxSlownessMs = maxSlownessMs; + } + + public double getExtraSlowness(int index) { + return (index >= count) ? 0 : maxSlownessMs; + } + +} diff --git a/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/ThroughputVsLatency.java b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/ThroughputVsLatency.java new file mode 100644 index 00000000000..b83c438e9b6 --- /dev/null +++ b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/ThroughputVsLatency.java @@ -0,0 +1,285 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.loadgen; + +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.Map; +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.CommandLineParser; +import org.apache.commons.cli.DefaultParser; +import org.apache.commons.cli.HelpFormatter; +import org.apache.commons.cli.Option; +import org.apache.commons.cli.Options; +import org.apache.commons.cli.ParseException; +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.metric.LoggingMetricsConsumer; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.BasicOutputCollector; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.topology.base.BaseBasicBolt; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; +import org.apache.storm.utils.NimbusClient; +import org.apache.storm.utils.Time; +import org.apache.storm.utils.Utils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * WordCount but the spout goes at a predefined rate and we collect + * proper latency statistics. + */ +public class ThroughputVsLatency { + private static final Logger LOG = LoggerFactory.getLogger(ThroughputVsLatency.class); + private static final int TEST_EXECUTE_TIME_DEFAULT = 5; + private static final long DEFAULT_RATE_PER_SECOND = 500; + private static final String DEFAULT_TOPO_NAME = "wc-test"; + private static final int DEFAULT_NUM_SPOUTS = 1; + private static final int DEFAULT_NUM_SPLITS = 1; + private static final int DEFAULT_NUM_COUNTS = 1; + + public static class FastRandomSentenceSpout extends LoadSpout { + static final String[] SENTENCES = new String[] { + "the cow jumped over the moon", + "an apple a day keeps the doctor away", + "four score and seven years ago", + "snow white and the seven dwarfs", + "i am at two with nature" + }; + + /** + * Constructor. + * @param ratePerSecond the rate to emite tuples at. + */ + public FastRandomSentenceSpout(long ratePerSecond) { + super(ratePerSecond); + } + + @Override + protected Values getNextValues(OutputStreamEngine se) { + String sentence = SENTENCES[se.nextInt(SENTENCES.length)]; + return new Values(sentence); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("sentence")); + } + } + + public static class SplitSentence extends BaseBasicBolt { + private ExecAndProcessLatencyEngine sleep; + private int executorIndex; + + public SplitSentence(SlowExecutorPattern slowness) { + super(); + sleep = new ExecAndProcessLatencyEngine(slowness); + } + + @Override + public void prepare(Map stormConf, + TopologyContext context) { + executorIndex = context.getThisTaskIndex(); + sleep.prepare(); + } + + @Override + public void execute(Tuple tuple, BasicOutputCollector collector) { + sleep.simulateProcessAndExecTime(executorIndex, Time.nanoTime(), null , () -> { + String sentence = tuple.getString(0); + for (String word: sentence.split("\\s+")) { + collector.emit(new Values(word, 1)); + } + }); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("word", "count")); + } + } + + public static class WordCount extends BaseBasicBolt { + Map counts = new HashMap<>(); + + @Override + public void execute(Tuple tuple, BasicOutputCollector collector) { + String word = tuple.getString(0); + Integer count = counts.get(word); + if (count == null) { + count = 0; + } + count++; + counts.put(word, count); + collector.emit(new Values(word, count)); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("word", "count")); + } + } + + /** + * The main entry point for ThroughputVsLatency. + * @param args the command line args + * @throws Exception on any error. + */ + public static void main(String[] args) throws Exception { + Options options = new Options(); + options.addOption(Option.builder("h") + .longOpt("help") + .desc("Print a help message") + .build()); + options.addOption(Option.builder("t") + .longOpt("test-time") + .argName("MINS") + .hasArg() + .desc("How long to run the tests for in mins (defaults to " + TEST_EXECUTE_TIME_DEFAULT + ")") + .build()); + options.addOption(Option.builder() + .longOpt("rate") + .argName("SENTENCES/SEC") + .hasArg() + .desc("How many sentences per second to run. (defaults to " + DEFAULT_RATE_PER_SECOND + ")") + .build()); + options.addOption(Option.builder() + .longOpt("name") + .argName("TOPO_NAME") + .hasArg() + .desc("Name of the topology to run (defaults to " + DEFAULT_TOPO_NAME + ")") + .build()); + options.addOption(Option.builder() + .longOpt("spouts") + .argName("NUM") + .hasArg() + .desc("Number of spouts to use (defaults to " + DEFAULT_NUM_SPOUTS + ")") + .build()); + options.addOption(Option.builder() + .longOpt("splitters") + .argName("NUM") + .hasArg() + .desc("Number of splitter bolts to use (defaults to " + DEFAULT_NUM_SPLITS + ")") + .build()); + options.addOption(Option.builder() + .longOpt("splitter-imbalance") + .argName("MS(:COUNT)?") + .hasArg() + .desc("The number of ms that the first COUNT splitters will wait before processing. This creates an imbalance " + + "that helps test load aware groupings (defaults to 0:1)") + .build()); + options.addOption(Option.builder() + .longOpt("counters") + .argName("NUM") + .hasArg() + .desc("Number of counter bolts to use (defaults to " + DEFAULT_NUM_COUNTS + ")") + .build()); + LoadMetricsServer.addCommandLineOptions(options); + CommandLineParser parser = new DefaultParser(); + CommandLine cmd = null; + Exception commandLineException = null; + SlowExecutorPattern slowness = null; + double numMins = TEST_EXECUTE_TIME_DEFAULT; + double ratePerSecond = DEFAULT_RATE_PER_SECOND; + String name = DEFAULT_TOPO_NAME; + int numSpouts = DEFAULT_NUM_SPOUTS; + int numSplits = DEFAULT_NUM_SPLITS; + int numCounts = DEFAULT_NUM_COUNTS; + try { + cmd = parser.parse(options, args); + if (cmd.hasOption("t")) { + numMins = Double.valueOf(cmd.getOptionValue("t")); + } + if (cmd.hasOption("rate")) { + ratePerSecond = Double.parseDouble(cmd.getOptionValue("rate")); + } + if (cmd.hasOption("name")) { + name = cmd.getOptionValue("name"); + } + if (cmd.hasOption("spouts")) { + numSpouts = Integer.parseInt(cmd.getOptionValue("spouts")); + } + if (cmd.hasOption("splitters")) { + numSplits = Integer.parseInt(cmd.getOptionValue("splitters")); + } + if (cmd.hasOption("counters")) { + numCounts = Integer.parseInt(cmd.getOptionValue("counters")); + } + if (cmd.hasOption("splitter-imbalance")) { + slowness = SlowExecutorPattern.fromString(cmd.getOptionValue("splitter-imbalance")); + } + } catch (ParseException | NumberFormatException e) { + commandLineException = e; + } + if (commandLineException != null || cmd.hasOption('h')) { + if (commandLineException != null) { + System.err.println("ERROR " + commandLineException.getMessage()); + } + new HelpFormatter().printHelp("ThroughputVsLatency [options]", options); + return; + } + + Map metrics = new LinkedHashMap<>(); + metrics.put("target_rate", ratePerSecond); + metrics.put("spout_parallel", numSpouts); + metrics.put("split_parallel", numSplits); + metrics.put("count_parallel", numCounts); + + Config conf = new Config(); + Map sysConf = Utils.readStormConfig(); + LoadMetricsServer metricServer = new LoadMetricsServer(sysConf, cmd, metrics); + metricServer.serve(); + String url = metricServer.getUrl(); + + NimbusClient client = NimbusClient.Builder.withConf(sysConf).build(); + conf.registerMetricsConsumer(LoggingMetricsConsumer.class); + conf.registerMetricsConsumer(HttpForwardingMetricsConsumer.class, url, 1); + Map workerMetrics = new HashMap<>(); + if (!NimbusClient.isLocalOverride()) { + //sigar uses JNI and does not work in local mode + workerMetrics.put("CPU", "org.apache.storm.metrics.sigar.CPUMetric"); + } + conf.put(Config.TOPOLOGY_WORKER_METRICS, workerMetrics); + conf.put(Config.TOPOLOGY_BUILTIN_METRICS_BUCKET_SIZE_SECS, 10); + + TopologyBuilder builder = new TopologyBuilder(); + + builder.setSpout("spout", new FastRandomSentenceSpout((long) ratePerSecond / numSpouts), numSpouts); + builder.setBolt("split", new SplitSentence(slowness), numSplits) + .shuffleGrouping("spout"); + builder.setBolt("count", new WordCount(), numCounts).fieldsGrouping("split", new Fields("word")); + + int exitStatus = -1; + try (ScopedTopologySet topologyNames = new ScopedTopologySet(client.getClient())) { + StormSubmitter.submitTopology(name, conf, builder.createTopology()); + topologyNames.add(name); + + metricServer.monitorFor(numMins, client.getClient(), topologyNames); + exitStatus = 0; + } catch (Exception e) { + LOG.error("Error while running test", e); + } finally { + System.exit(exitStatus); + } + } +} diff --git a/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/TopologyLoadConf.java b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/TopologyLoadConf.java new file mode 100644 index 00000000000..c639091fc12 --- /dev/null +++ b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/TopologyLoadConf.java @@ -0,0 +1,505 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.loadgen; + +import java.io.File; +import java.io.FileReader; +import java.io.FileWriter; +import java.io.IOException; +import java.io.StringWriter; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import org.apache.storm.Config; +import org.apache.storm.generated.GlobalStreamId; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.yaml.snakeyaml.LoaderOptions; +import org.yaml.snakeyaml.Yaml; +import org.yaml.snakeyaml.constructor.SafeConstructor; + +/** + * Configuration for a simulated topology. + */ +public class TopologyLoadConf { + private static final Logger LOG = LoggerFactory.getLogger(TopologyLoadConf.class); + static final Set IMPORTANT_CONF_KEYS = Collections.unmodifiableSet(new HashSet(Arrays.asList( + Config.TOPOLOGY_WORKERS, + Config.TOPOLOGY_ACKER_EXECUTORS, + Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT, + Config.TOPOLOGY_COMPONENT_RESOURCES_OFFHEAP_MEMORY_MB, + Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB, + Config.TOPOLOGY_DISABLE_LOADAWARE_MESSAGING, + Config.TOPOLOGY_DEBUG, + Config.TOPOLOGY_EXECUTOR_RECEIVE_BUFFER_SIZE, + Config.TOPOLOGY_ISOLATED_MACHINES, + Config.TOPOLOGY_MAX_SPOUT_PENDING, + Config.TOPOLOGY_MAX_TASK_PARALLELISM, + Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS, + Config.TOPOLOGY_PRIORITY, + Config.TOPOLOGY_SCHEDULER_STRATEGY, + Config.TOPOLOGY_SHELLBOLT_MAX_PENDING, + Config.TOPOLOGY_SLEEP_SPOUT_WAIT_STRATEGY_TIME_MS, + Config.TOPOLOGY_SPOUT_WAIT_STRATEGY, + Config.TOPOLOGY_WORKER_CHILDOPTS, + Config.TOPOLOGY_WORKER_GC_CHILDOPTS, + Config.TOPOLOGY_WORKER_SHARED_THREAD_POOL_SIZE, + Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB + ))); + private static AtomicInteger topoUniquifier = new AtomicInteger(0); + + public final String name; + public final Map topoConf; + public final List spouts; + public final List bolts; + public final List streams; + private final AtomicInteger boltUniquifier = new AtomicInteger(0); + private final AtomicInteger spoutUniquifier = new AtomicInteger(0); + private final AtomicInteger streamUniquifier = new AtomicInteger(0); + + /** + * Parse the TopologyLoadConf from a file in YAML format. + * @param file the file to read from + * @return the parsed conf + * @throws IOException if there is an issue reading the file. + */ + public static TopologyLoadConf fromConf(File file) throws IOException { + Yaml yaml = new Yaml(new SafeConstructor(new LoaderOptions())); + Map yamlConf = (Map) yaml.load(new FileReader(file)); + return TopologyLoadConf.fromConf(yamlConf); + } + + /** + * Parse the TopologyLoadConf from a config map. + * @param conf the config with the TopologyLoadConf in it + * @return the parsed instance. + */ + public static TopologyLoadConf fromConf(Map conf) { + Map topoConf = null; + if (conf.containsKey("config")) { + topoConf = new HashMap<>((Map) conf.get("config")); + } + + List spouts = new ArrayList<>(); + for (Map spoutInfo: (List>) conf.get("spouts")) { + spouts.add(LoadCompConf.fromConf(spoutInfo)); + } + + List bolts = new ArrayList<>(); + List> boltInfos = (List>) conf.get("bolts"); + if (boltInfos != null) { + for (Map boltInfo : boltInfos) { + bolts.add(LoadCompConf.fromConf(boltInfo)); + } + } + + List streams = new ArrayList<>(); + List> streamInfos = (List>) conf.get("streams"); + if (streamInfos != null) { + for (Map streamInfo: streamInfos) { + streams.add(InputStream.fromConf(streamInfo)); + } + } + + return new TopologyLoadConf((String) conf.get("name"), topoConf, spouts, bolts, streams); + } + + /** + * Write this out to a file in YAML format. + * @param file the file to write to. + * @throws IOException if there is an error writing to the file. + */ + public void writeTo(File file) throws IOException { + Yaml yaml = new Yaml(new SafeConstructor(new LoaderOptions())); + try (FileWriter writer = new FileWriter(file)) { + yaml.dump(toConf(), writer); + } + } + + /** + * Convert this into a YAML String. + * @return this as a YAML String. + */ + public String toYamlString() { + Yaml yaml = new Yaml(new SafeConstructor(new LoaderOptions())); + StringWriter writer = new StringWriter(); + yaml.dump(toConf(), writer); + return writer.toString(); + } + + /** + * Covert this into a Map config. + * @return this as a Map config. + */ + public Map toConf() { + Map ret = new HashMap<>(); + if (name != null) { + ret.put("name", name); + } + if (topoConf != null) { + ret.put("config", topoConf); + } + if (spouts != null && !spouts.isEmpty()) { + ret.put("spouts", spouts.stream().map(LoadCompConf::toConf) + .collect(Collectors.toList())); + } + + if (bolts != null && !bolts.isEmpty()) { + ret.put("bolts", bolts.stream().map(LoadCompConf::toConf) + .collect(Collectors.toList())); + } + + if (streams != null && !streams.isEmpty()) { + ret.put("streams", streams.stream().map(InputStream::toConf) + .collect(Collectors.toList())); + } + return ret; + } + + /** + * Constructor. + * @param name the name of the topology. + * @param topoConf the config for the topology + * @param spouts the spouts for the topology + * @param bolts the bolts for the topology + * @param streams the streams for the topology + */ + public TopologyLoadConf(String name, Map topoConf, + List spouts, List bolts, List streams) { + this.name = name; + this.topoConf = topoConf; + this.spouts = spouts; + this.bolts = bolts; + this.streams = streams; + } + + private static String getUniqueTopoName() { + return "topology_" + asCharString(topoUniquifier.getAndIncrement()); + } + + private String getUniqueBoltName() { + return "bolt_" + asCharString(boltUniquifier.getAndIncrement()); + } + + private String getUniqueSpoutName() { + return "spout_" + asCharString(spoutUniquifier.getAndIncrement()); + } + + private String getUniqueStreamName() { + return "stream_" + asCharString(spoutUniquifier.getAndIncrement()); + } + + private static String asCharString(int value) { + int div = value / 26; + int remainder = value % 26; + String ret = ""; + if (div > 0) { + ret = asCharString(div); + } + ret += (char) ((int) 'a' + remainder); + return ret; + } + + public TopologyLoadConf withName(String baseName) { + return new TopologyLoadConf(baseName, topoConf, spouts, bolts, streams); + } + + /** + * The first one that is not null. + * @param rest all the other somethings + * @param whatever type you want. + * @return the first one that is not null + */ + static V or(V...rest) { + for (V i: rest) { + if (i != null) { + return i; + } + } + return null; + } + + LoadCompConf scaleCompParallel(LoadCompConf comp, double v, Map topoSpecificParallel) { + LoadCompConf ret = comp; + double scale = or(topoSpecificParallel.get(name + ":" + comp.id), + topoSpecificParallel.get(name + ":*"), + topoSpecificParallel.get("*:" + comp.id), + v); + if (scale != 1.0) { + ret = ret.scaleParallel(scale); + } + return ret; + } + + LoadCompConf scaleCompThroughput(LoadCompConf comp, double v, Map topoSpecificParallel) { + LoadCompConf ret = comp; + double scale = or(topoSpecificParallel.get(name + ":" + comp.id), + topoSpecificParallel.get(name + ":*"), + topoSpecificParallel.get("*:" + comp.id), + v); + if (scale != 1.0) { + ret = ret.scaleThroughput(scale); + } + return ret; + } + + private LoadCompConf overrideCompSlowExec(LoadCompConf comp, Map topoSpecific) { + LoadCompConf ret = comp; + SlowExecutorPattern slp = topoSpecific.get(name + ":" + comp.id); + if (slp != null) { + ret = ret.overrideSlowExecutorPattern(slp); + } + return ret; + } + + /** + * Scale all of the components in the topology by a percentage (but keep the throughput the same). + * @param v the amount to scale them by. 1.0 is nothing, 0.5 cuts them in half, 2.0 doubles them. + * @return a copy of this with the needed adjustments made. + */ + public TopologyLoadConf scaleParallel(double v, Map topoSpecific) { + if (v == 1.0 && (topoSpecific == null || topoSpecific.isEmpty())) { + return this; + } + List scaledSpouts = spouts.stream().map((s) -> scaleCompParallel(s, v, topoSpecific)) + .collect(Collectors.toList()); + List scaledBolts = bolts.stream().map((s) -> scaleCompParallel(s, v, topoSpecific)) + .collect(Collectors.toList()); + return new TopologyLoadConf(name, topoConf, scaledSpouts, scaledBolts, streams); + } + + /** + * Scale the throughput of the entire topology by a percentage. + * @param v the amount to scale it by 1.0 is nothing 0.5 cuts it in half and 2.0 doubles it. + * @return a copy of this with the needed adjustments made. + */ + public TopologyLoadConf scaleThroughput(double v, Map topoSpecific) { + if (v == 1.0 && (topoSpecific == null || topoSpecific.isEmpty())) { + return this; + } + List scaledSpouts = spouts.stream().map((s) -> scaleCompThroughput(s, v, topoSpecific)) + .collect(Collectors.toList()); + List scaledBolts = bolts.stream().map((s) -> scaleCompThroughput(s, v, topoSpecific)) + .collect(Collectors.toList()); + return new TopologyLoadConf(name, topoConf, scaledSpouts, scaledBolts, streams); + } + + /** + * Override the SlowExecutorPattern for given components. + * @param topoSpecific what we are going to use to override. + * @return a copy of this with the needed adjustments made. + */ + public TopologyLoadConf overrideSlowExecs(Map topoSpecific) { + if (topoSpecific == null || topoSpecific.isEmpty()) { + return this; + } + List modedSpouts = spouts.stream().map((s) -> overrideCompSlowExec(s, topoSpecific)) + .collect(Collectors.toList()); + List modedBolts = bolts.stream().map((b) -> overrideCompSlowExec(b, topoSpecific)) + .collect(Collectors.toList()); + return new TopologyLoadConf(name, topoConf, modedSpouts, modedBolts, streams); + } + + /** + * Create a new version of this topology with identifiable information removed. + * @return the anonymized version of the TopologyLoadConf. + */ + public TopologyLoadConf anonymize() { + Map remappedComponents = new HashMap<>(); + Map remappedStreams = new HashMap<>(); + for (LoadCompConf comp: bolts) { + String newId = getUniqueBoltName(); + remappedComponents.put(comp.id, newId); + if (comp.streams != null) { + for (OutputStream out : comp.streams) { + GlobalStreamId orig = new GlobalStreamId(comp.id, out.id); + GlobalStreamId remapped = new GlobalStreamId(newId, getUniqueStreamName()); + remappedStreams.put(orig, remapped); + } + } + } + + for (LoadCompConf comp: spouts) { + remappedComponents.put(comp.id, getUniqueSpoutName()); + String newId = getUniqueSpoutName(); + remappedComponents.put(comp.id, newId); + if (comp.streams != null) { + for (OutputStream out : comp.streams) { + GlobalStreamId orig = new GlobalStreamId(comp.id, out.id); + GlobalStreamId remapped = new GlobalStreamId(newId, getUniqueStreamName()); + remappedStreams.put(orig, remapped); + } + } + } + + for (InputStream in : streams) { + if (!remappedComponents.containsKey(in.toComponent)) { + remappedComponents.put(in.toComponent, getUniqueSpoutName()); + } + GlobalStreamId orig = in.gsid(); + if (!remappedStreams.containsKey(orig)) { + //Even if the topology is not valid we still need to remap it all + String remappedComp = remappedComponents.computeIfAbsent(in.fromComponent, (key) -> { + LOG.warn("stream's {} from is not defined {}", in.id, in.fromComponent); + return getUniqueBoltName(); + }); + remappedStreams.put(orig, new GlobalStreamId(remappedComp, getUniqueStreamName())); + } + } + + //Now we need to map them all back again + List remappedSpouts = spouts.stream() + .map((orig) -> orig.remap(remappedComponents, remappedStreams)) + .collect(Collectors.toList()); + List remappedBolts = bolts.stream() + .map((orig) -> orig.remap(remappedComponents, remappedStreams)) + .collect(Collectors.toList()); + List remappedInputStreams = streams.stream() + .map((orig) -> orig.remap(remappedComponents, remappedStreams)) + .collect(Collectors.toList()); + return new TopologyLoadConf(getUniqueTopoName(), anonymizeTopoConf(topoConf), remappedSpouts, remappedBolts, remappedInputStreams); + } + + private static Map anonymizeTopoConf(Map topoConf) { + //Only keep important conf keys + Map ret = new HashMap<>(); + for (Map.Entry entry: topoConf.entrySet()) { + String key = entry.getKey(); + Object value = entry.getValue(); + if (IMPORTANT_CONF_KEYS.contains(key)) { + if (Config.TOPOLOGY_WORKER_CHILDOPTS.equals(key) + || Config.TOPOLOGY_WORKER_GC_CHILDOPTS.equals(key)) { + value = cleanupChildOpts(value); + } + ret.put(key, value); + } + } + return ret; + } + + private static Object cleanupChildOpts(Object value) { + if (value instanceof String) { + String sv = (String) value; + StringBuffer ret = new StringBuffer(); + for (String part: sv.split("\\s+")) { + if (part.startsWith("-X")) { + ret.append(part).append(" "); + } + } + return ret.toString(); + } else { + List ret = new ArrayList<>(); + for (String subValue: (Collection) value) { + ret.add((String) cleanupChildOpts(subValue)); + } + return ret.stream().filter((item) -> item != null && !item.isEmpty()).collect(Collectors.toList()); + } + } + + /** + * Try to see if this looks like a trident topology. + * NOTE: this will not work for anonymized configs + * @return true if it does else false. + */ + public boolean looksLikeTrident() { + for (LoadCompConf spout: spouts) { + if (spout.id.startsWith("$mastercoord")) { + return true; + } + } + + for (LoadCompConf bolt: bolts) { + if (bolt.id.startsWith("$spoutcoord")) { + return true; + } + } + + for (InputStream in: streams) { + if (in.id.equals("$batch")) { + return true; + } + } + return false; + } + + /** + * Get the messages emitted per second in aggregate across all streams in the topology. + * @return messages per second. + */ + public double getAllEmittedAggregate() { + double ret = getSpoutEmittedAggregate(); + for (LoadCompConf bolt: bolts) { + ret += bolt.getAllEmittedAggregate(); + } + return ret; + } + + /** + * Get the messages emitted per second in aggregate for all of the spouts in the topology. + * @return messages per second. + */ + public double getSpoutEmittedAggregate() { + double ret = 0; + for (LoadCompConf spout: spouts) { + ret += spout.getAllEmittedAggregate(); + } + return ret; + } + + /** + * Try and guess at the actual number of messages emitted per second by a trident topology, not the number of batches. + * This does not work on an anonymized conf. + * @return messages per second or 0 if this does not look like a trident topology. + */ + public double getTridentEstimatedEmittedAggregate() { + //In this case we are ignoring the coord stuff, and only looking at + double ret = 0; + if (looksLikeTrident()) { + List all = new ArrayList<>(bolts); + all.addAll(spouts); + for (LoadCompConf comp : all) { + if (comp.id.startsWith("spout-")) { + if (comp.streams != null) { + for (OutputStream out: comp.streams) { + if (!out.id.startsWith("$") + && !out.id.startsWith("__") + && out.rate != null) { + ret += out.rate.mean * comp.parallelism; + } + } + } + } + } + } + return ret; + } + + public TopologyLoadConf replaceShuffleWithLocalOrShuffle() { + List modified = streams.stream().map((in) -> in.replaceShuffleWithLocalOrShuffle()).collect(Collectors.toList()); + return new TopologyLoadConf(name, topoConf, spouts, bolts, modified); + } +} diff --git a/examples/storm-loadgen/src/test/java/org/apache/storm/loadgen/LoadCompConfTest.java b/examples/storm-loadgen/src/test/java/org/apache/storm/loadgen/LoadCompConfTest.java new file mode 100644 index 00000000000..ed10692cdc4 --- /dev/null +++ b/examples/storm-loadgen/src/test/java/org/apache/storm/loadgen/LoadCompConfTest.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.loadgen; + +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +public class LoadCompConfTest { + @Test + public void scaleParallel() { + LoadCompConf orig = new LoadCompConf.Builder() + .withId("SOME_SPOUT") + .withParallelism(1) + .withStream(new OutputStream("default", new NormalDistStats(500.0, 100.0, 300.0, 600.0), false)) + .build(); + assertEquals(500.0, orig.getAllEmittedAggregate(), 0.001); + LoadCompConf scaled = orig.scaleParallel(2); + //Parallelism is double + assertEquals(2, scaled.parallelism); + assertEquals("SOME_SPOUT", scaled.id); + //But throughput is the same + assertEquals(500.0, scaled.getAllEmittedAggregate(), 0.001); + } + + @Test + public void scaleThroughput() { + LoadCompConf orig = new LoadCompConf.Builder() + .withId("SOME_SPOUT") + .withParallelism(1) + .withStream(new OutputStream("default", new NormalDistStats(500.0, 100.0, 300.0, 600.0), false)) + .build(); + assertEquals(500.0, orig.getAllEmittedAggregate(), 0.001); + LoadCompConf scaled = orig.scaleThroughput(2.0); + //Parallelism is same + assertEquals(1, scaled.parallelism); + assertEquals("SOME_SPOUT", scaled.id); + //But throughput is the same + assertEquals(1000.0, scaled.getAllEmittedAggregate(), 0.001); + } +} \ No newline at end of file diff --git a/examples/storm-loadgen/src/test/java/org/apache/storm/loadgen/LoadMetricsServerTest.java b/examples/storm-loadgen/src/test/java/org/apache/storm/loadgen/LoadMetricsServerTest.java new file mode 100644 index 00000000000..7f549be1c0c --- /dev/null +++ b/examples/storm-loadgen/src/test/java/org/apache/storm/loadgen/LoadMetricsServerTest.java @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.loadgen; + +import java.util.concurrent.TimeUnit; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.apache.storm.loadgen.LoadMetricsServer.convert; + +public class LoadMetricsServerTest { + @Test + public void convertTest() { + for (TimeUnit from : TimeUnit.values()) { + for (TimeUnit to : TimeUnit.values()) { + assertEquals(1.0, convert(convert(1.0, from, to), to, from), 0.0001, + from + " to " + to + " and back"); + } + } + } +} \ No newline at end of file diff --git a/examples/storm-loadgen/src/test/java/org/apache/storm/loadgen/NormalDistStatsTest.java b/examples/storm-loadgen/src/test/java/org/apache/storm/loadgen/NormalDistStatsTest.java new file mode 100644 index 00000000000..3512bbae13d --- /dev/null +++ b/examples/storm-loadgen/src/test/java/org/apache/storm/loadgen/NormalDistStatsTest.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.loadgen; + +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +public class NormalDistStatsTest { + public static void assertNDSEquals(NormalDistStats a, NormalDistStats b) { + assertEquals(a.mean, b.mean, 0.0001, "mean"); + assertEquals(a.min, b.min, 0.0001, "min"); + assertEquals(a.max, b.max, 0.0001, "max"); + assertEquals(a.stddev, b.stddev, 0.0001, "stddev"); + } + + @Test + public void scaleBy() { + NormalDistStats orig = new NormalDistStats(1.0, 0.5, 0.0, 2.0); + assertNDSEquals(orig, orig.scaleBy(1.0)); + NormalDistStats expectedDouble = new NormalDistStats(2.0, 0.5, 1.0, 3.0); + assertNDSEquals(expectedDouble, orig.scaleBy(2.0)); + NormalDistStats expectedHalf = new NormalDistStats(0.5, 0.5, 0.0, 1.5); + assertNDSEquals(expectedHalf, orig.scaleBy(0.5)); + } + +} \ No newline at end of file diff --git a/examples/storm-loadgen/src/test/java/org/apache/storm/loadgen/OutputStreamTest.java b/examples/storm-loadgen/src/test/java/org/apache/storm/loadgen/OutputStreamTest.java new file mode 100644 index 00000000000..3980e2a7ab6 --- /dev/null +++ b/examples/storm-loadgen/src/test/java/org/apache/storm/loadgen/OutputStreamTest.java @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.loadgen; + +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +public class OutputStreamTest { + @Test + public void scaleThroughput() { + OutputStream orig = new OutputStream("ID", new NormalDistStats(100.0, 1.0, 99.0, 101.0), false); + OutputStream scaled = orig.scaleThroughput(2.0); + assertEquals(orig.id, scaled.id); + assertEquals(orig.areKeysSkewed, scaled.areKeysSkewed); + assertEquals(scaled.rate.mean, 200.0, 0.0001); + assertEquals(scaled.rate.stddev, 1.0, 0.0001); + assertEquals(scaled.rate.min, 199.0, 0.0001); + assertEquals(scaled.rate.max, 201.0, 0.0001); + } +} \ No newline at end of file diff --git a/examples/storm-perf/README.markdown b/examples/storm-perf/README.markdown new file mode 100644 index 00000000000..b0c4ffac788 --- /dev/null +++ b/examples/storm-perf/README.markdown @@ -0,0 +1,50 @@ +# Topologies for measuring Storm performance + +This module includes topologies designed for measuring Storm performance. + +## Overview +There are two basic modes for running these topologies + +- **Cluster mode:** Submits the topology to a storm cluster. This mode is useful for benchmarking. It calculates throughput and latency numbers every minute and prints them on the console. +- **In-process mode:** Uses LocalCluster to run topology. This mode helps identify bottlenecks using profilers like JProfiler from within a IDE. This mode does not print metrics. + +In both the modes, a shutdown hook is setup to terminate the topology when the program that is submitting the topology is terminated. + +The bundled topologies can be classified into two types. + +- Topologies that measure purely the internal functioning of Storm. Such topologies do not interact with external systems like Kafka or Hdfs. +- Topologies that measure speed of I/O with external systems like Kafka and Hdfs. + +Topologies that measure internal performance can be run in either in-proc or cluster modes. +Topologies that measure I/O with external systems are designed to run in cluster mode only. + +## Topologies List + +1. **ConstSpoutOnlyTopo:** Helps measure how fast spout can emit. This topology has a spout and is not connected to any bolts. Supports cluster mode only. +2. **ConstSpoutNullBoltTopo:** Helps measure how fast spout can send data to a bolt. Spout emits a stream of constant values to a DevNull bolt which discards the incoming tuples. Supports cluster mode only. +3. **ConstSpoutIdBoltNullBoltTopo:** Helps measure speed of messaging between spouts and bolts. Spout emits a stream of constant values to an ID bolt which clones the tuple and emits it downstream to a DevNull bolt. Supports cluster mode only. +4. **FileReadWordCountTopo:** Measures speed of word counting. The spout loads a file into memory and emits these lines in an infinite loop. Supports cluster mode only. +5. **HdfsSpoutNullBoltTopo:** Measures speed at which HdfsSpout can read from HDFS. Supports cluster mode only. +6. **StrGenSpoutHdfsBoltTopo:** Measures speed at which HdfsBolt can write to HDFS. Supports cluster mode only. +7. **KafkaClientHdfsTopo:** Measures how fast Storm can read from Kafka and write to HDFS, using the storm-kafka-client spout. Supports cluster mode only +8. **KafkaClientSpoutNullBoltTopo:** Measures the speed at which the storm-kafka-client KafkaSpout can read from Kafka. Supports cluster mode only. + + +## How to run ? + +### In-process mode: +This mode is intended for running the topology quickly and easily from within the IDE and does not expect any command line arguments. +Simply running the Topology's main() method without any arguments will get it running. The topology runs indefinitely till the program is terminated. + + +### Cluster mode: +When the topology is run with one or more than one cmd line arguments, the topology is submitted to the cluster. +The first argument indicates how long the topology should be run. Often the second argument refers to a yaml config +file which contains topology configuration settings. The conf/ directory in this module contains sample config files +with names matching the corresponding topology. + +These topologies can be run using the standard storm jar command. + +``` +bin/storm jar /path/storm-perf-1.1.0-jar-with-dependencies.jar org.apache.storm.perf.ConstSpoutNullBoltTopo 200 conf/ConstSpoutIdBoltNullBoltTopo.yaml +``` \ No newline at end of file diff --git a/examples/storm-perf/pom.xml b/examples/storm-perf/pom.xml new file mode 100644 index 00000000000..74f96d8cd99 --- /dev/null +++ b/examples/storm-perf/pom.xml @@ -0,0 +1,134 @@ + + + + 4.0.0 + + storm + org.apache.storm + 2.8.4-SNAPSHOT + ../../pom.xml + + + storm-perf + jar + Storm Perf + Topologies and tools to measure performance. + + + + + org.apache.maven.plugins + maven-shade-plugin + + true + + + *:* + + META-INF/*.SF + META-INF/*.sf + META-INF/*.DSA + META-INF/*.dsa + META-INF/*.RSA + META-INF/*.rsa + META-INF/*.EC + META-INF/*.ec + META-INF/MSFTSIG.SF + META-INF/MSFTSIG.RSA + + + + + + + package + + shade + + + + + + + + + + + org.codehaus.mojo + exec-maven-plugin + + + + exec + + + + + java + true + false + compile + ${storm.topology} + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + + + org.apache.maven.plugins + maven-pmd-plugin + + + + + + + org.apache.storm + storm-client + ${project.version} + + ${provided.scope} + + + org.apache.storm + storm-hdfs + ${project.version} + + + org.apache.storm + storm-kafka-client + ${project.version} + + + org.apache.kafka + kafka-clients + compile + + + org.jctools + jctools-core + + + + + diff --git a/examples/storm-perf/src/main/conf/ConstSpoutIdBoltNullBoltTopo.yaml b/examples/storm-perf/src/main/conf/ConstSpoutIdBoltNullBoltTopo.yaml new file mode 100644 index 00000000000..9f74aee348a --- /dev/null +++ b/examples/storm-perf/src/main/conf/ConstSpoutIdBoltNullBoltTopo.yaml @@ -0,0 +1,22 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +spout.count : 1 +bolt1.count : 1 # IdBolt instances +bolt2.count : 1 # DevNullBolt instances + +# storm config overrides +topology.workers : 1 \ No newline at end of file diff --git a/examples/storm-perf/src/main/conf/ConstSpoutNullBoltTopo.yaml b/examples/storm-perf/src/main/conf/ConstSpoutNullBoltTopo.yaml new file mode 100644 index 00000000000..51f2dd7361a --- /dev/null +++ b/examples/storm-perf/src/main/conf/ConstSpoutNullBoltTopo.yaml @@ -0,0 +1,22 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +spout.count : 1 +bolt.count : 1 +grouping : "local" # either "shuffle" or "local" + +# storm config overrides +topology.workers : 1 \ No newline at end of file diff --git a/examples/storm-perf/src/main/conf/FileReadWordCountTopo.yaml b/examples/storm-perf/src/main/conf/FileReadWordCountTopo.yaml new file mode 100644 index 00000000000..61abe8f7de0 --- /dev/null +++ b/examples/storm-perf/src/main/conf/FileReadWordCountTopo.yaml @@ -0,0 +1,23 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +spout.count : 1 +splitter.count : 1 +counter.count : 1 +input.file : "/Users/roshan/Projects/idea/storm/storm-perf/src/main/resources/randomwords.txt" + +# storm config overrides +topology.workers : 1 \ No newline at end of file diff --git a/examples/storm-perf/src/main/conf/HdfsSpoutNullBoltTopo.yaml b/examples/storm-perf/src/main/conf/HdfsSpoutNullBoltTopo.yaml new file mode 100644 index 00000000000..a06ad6e1a03 --- /dev/null +++ b/examples/storm-perf/src/main/conf/HdfsSpoutNullBoltTopo.yaml @@ -0,0 +1,25 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +spout.count : 1 +bolt.count : 1 +hdfs.uri : "hdfs://hdfs.namenode:8020" +hdfs.source.dir : "/tmp/storm/in" +hdfs.archive.dir : "/tmp/storm/done" +hdfs.bad.dir : "/tmp/storm/bad" + +# storm config overrides +topology.workers : 1 \ No newline at end of file diff --git a/examples/storm-perf/src/main/conf/KafkaClientHdfsTopo.yaml b/examples/storm-perf/src/main/conf/KafkaClientHdfsTopo.yaml new file mode 100755 index 00000000000..2047d1b23e7 --- /dev/null +++ b/examples/storm-perf/src/main/conf/KafkaClientHdfsTopo.yaml @@ -0,0 +1,26 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +spout.count : 1 +bolt.count : 1 +kafka.topic : "kafka_topic" +kafka.bootstrap.hosts : "localhost:9092" +hdfs.uri : "hdfs://hdfs.namenode:8020" +hdfs.dir : "/tmp/storm" +hdfs.batch : 1000 + +# storm config overrides +topology.workers : 1 \ No newline at end of file diff --git a/examples/storm-perf/src/main/conf/KafkaClientSpoutNullBoltTopo.yml b/examples/storm-perf/src/main/conf/KafkaClientSpoutNullBoltTopo.yml new file mode 100644 index 00000000000..2fb18819ed9 --- /dev/null +++ b/examples/storm-perf/src/main/conf/KafkaClientSpoutNullBoltTopo.yml @@ -0,0 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +bootstrap.servers: "127.0.0.1:9092" +kafka.topic: "storm-perf-null-bolt-topic" +processing.guarantee: "AT_LEAST_ONCE" +offset.commit.period.ms: 30000 \ No newline at end of file diff --git a/examples/storm-perf/src/main/conf/StrGenSpoutHdfsBoltTopo.yaml b/examples/storm-perf/src/main/conf/StrGenSpoutHdfsBoltTopo.yaml new file mode 100644 index 00000000000..d16431b8fc9 --- /dev/null +++ b/examples/storm-perf/src/main/conf/StrGenSpoutHdfsBoltTopo.yaml @@ -0,0 +1,25 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +spout.count : 1 +bolt.count : 1 +hdfs.uri : "hdfs://hdfs.namenode:8020" +hdfs.dir : "/tmp/storm" +hdfs.batch : 1000 + + +# storm config overrides +topology.workers : 1 \ No newline at end of file diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/BackPressureTopo.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/BackPressureTopo.java new file mode 100644 index 00000000000..5fb70220eb9 --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/BackPressureTopo.java @@ -0,0 +1,115 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.storm.perf; + +import java.util.Map; +import org.apache.storm.Config; +import org.apache.storm.generated.StormTopology; +import org.apache.storm.perf.spout.ConstSpout; +import org.apache.storm.perf.utils.Helper; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.BoltDeclarer; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.topology.base.BaseRichBolt; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.utils.ObjectReader; +import org.apache.storm.utils.Utils; +import org.slf4j.LoggerFactory; + + +public class BackPressureTopo { + + private static final String SPOUT_ID = "ConstSpout"; + private static final String BOLT_ID = "ThrottledBolt"; + private static final Integer SPOUT_COUNT = 1; + private static final Integer BOLT_COUNT = 1; + private static final String SLEEP_MS = "sleep"; + + static StormTopology getTopology(Map conf) { + + Long sleepMs = ObjectReader.getLong(conf.get(SLEEP_MS)); + // 1 - Setup Spout -------- + ConstSpout spout = new ConstSpout("some data").withOutputFields("string"); + + // 2 - Setup DevNull Bolt -------- + ThrottledBolt bolt = new ThrottledBolt(sleepMs); + + + // 3 - Setup Topology -------- + TopologyBuilder builder = new TopologyBuilder(); + + builder.setSpout(SPOUT_ID, spout, Helper.getInt(conf, SPOUT_COUNT, 1)); + BoltDeclarer bd = builder.setBolt(BOLT_ID, bolt, Helper.getInt(conf, BOLT_COUNT, 1)); + + bd.localOrShuffleGrouping(SPOUT_ID); + return builder.createTopology(); + } + + public static void main(String[] args) throws Exception { + int runTime = -1; + Config topoConf = new Config(); + topoConf.put(Config.TOPOLOGY_SPOUT_RECVQ_SKIPS, 1); + topoConf.putAll(Utils.readCommandLineOpts()); + if (args.length > 0) { + long sleepMs = Integer.parseInt(args[0]); + topoConf.put(SLEEP_MS, sleepMs); + } + if (args.length > 1) { + runTime = Integer.parseInt(args[1]); + } + if (args.length > 2) { + System.err.println("args: boltSleepMs [runDurationSec] "); + return; + } + // Submit topology to storm cluster + Helper.runOnClusterAndPrintMetrics(runTime, "BackPressureTopo", topoConf, getTopology(topoConf)); + } + + private static class ThrottledBolt extends BaseRichBolt { + private static final org.slf4j.Logger LOG = LoggerFactory.getLogger(ThrottledBolt.class); + private OutputCollector collector; + private long sleepMs; + + ThrottledBolt(Long sleepMs) { + this.sleepMs = sleepMs; + } + + @Override + public void prepare(Map topoConf, TopologyContext context, OutputCollector collector) { + this.collector = collector; + } + + @Override + public void execute(Tuple tuple) { + collector.ack(tuple); + LOG.debug("Sleeping"); + try { + Thread.sleep(sleepMs); + } catch (InterruptedException e) { + //.. ignore + } + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + } + } +} diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/ConstSpoutIdBoltNullBoltTopo.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/ConstSpoutIdBoltNullBoltTopo.java new file mode 100644 index 00000000000..c83763d6e26 --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/ConstSpoutIdBoltNullBoltTopo.java @@ -0,0 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.storm.perf; + +import java.util.Map; +import org.apache.storm.Config; +import org.apache.storm.generated.StormTopology; +import org.apache.storm.perf.bolt.DevNullBolt; +import org.apache.storm.perf.bolt.IdBolt; +import org.apache.storm.perf.spout.ConstSpout; +import org.apache.storm.perf.utils.Helper; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.utils.Utils; + +/** + * ConstSpout -> IdBolt -> DevNullBolt This topology measures speed of messaging between spouts->bolt and bolt->bolt ConstSpout : + * Continuously emits a constant string IdBolt : clones and emits input tuples DevNullBolt : discards incoming tuples. + */ +public class ConstSpoutIdBoltNullBoltTopo { + + public static final String TOPOLOGY_NAME = "ConstSpoutIdBoltNullBoltTopo"; + public static final String SPOUT_ID = "constSpout"; + public static final String BOLT1_ID = "idBolt"; + public static final String BOLT2_ID = "nullBolt"; + + // Configs + public static final String BOLT1_COUNT = "bolt1.count"; + public static final String BOLT2_COUNT = "bolt2.count"; + public static final String SPOUT_COUNT = "spout.count"; + + static StormTopology getTopology(Map conf) { + + // 1 - Setup Spout -------- + ConstSpout spout = new ConstSpout("some data").withOutputFields("str"); + + // 2 - Setup IdBolt & DevNullBolt -------- + IdBolt bolt1 = new IdBolt(); + DevNullBolt bolt2 = new DevNullBolt(); + + + // 3 - Setup Topology -------- + TopologyBuilder builder = new TopologyBuilder(); + + int numSpouts = Helper.getInt(conf, SPOUT_COUNT, 1); + builder.setSpout(SPOUT_ID, spout, numSpouts); + + int numBolt1 = Helper.getInt(conf, BOLT1_COUNT, 1); + builder.setBolt(BOLT1_ID, bolt1, numBolt1) + .localOrShuffleGrouping(SPOUT_ID); + + int numBolt2 = Helper.getInt(conf, BOLT2_COUNT, 1); + builder.setBolt(BOLT2_ID, bolt2, numBolt2) + .localOrShuffleGrouping(BOLT1_ID); + System.err.printf("====> Using : numSpouts = %d , numBolt1 = %d, numBolt2=%d\n", numSpouts, numBolt1, numBolt2); + return builder.createTopology(); + } + + + public static void main(String[] args) throws Exception { + int runTime = -1; + Config topoConf = new Config(); + // Configure for achieving max throughput in single worker mode (empirically found). + // -- Expect ~5.3 mill/sec (3.2 mill/sec with batchSz=1) + // -- ~1 mill/sec, lat= ~20 microsec with acker=1 & batchSz=1 + topoConf.put(Config.TOPOLOGY_SPOUT_RECVQ_SKIPS, 8); + topoConf.put(Config.TOPOLOGY_PRODUCER_BATCH_SIZE, 500); + topoConf.put(Config.TOPOLOGY_EXECUTOR_RECEIVE_BUFFER_SIZE, 50_000); + topoConf.put(Config.TOPOLOGY_DISABLE_LOADAWARE_MESSAGING, true); + topoConf.put(Config.TOPOLOGY_STATS_SAMPLE_RATE, 0.0005); + + if (args.length > 0) { + runTime = Integer.parseInt(args[0]); + } + if (args.length > 1) { + topoConf.putAll(Utils.findAndReadConfigFile(args[1])); + } + topoConf.putAll(Utils.readCommandLineOpts()); + + if (args.length > 2) { + System.err.println("args: [runDurationSec] [optionalConfFile]"); + return; + } + // Submit topology to storm cluster + Helper.runOnClusterAndPrintMetrics(runTime, TOPOLOGY_NAME, topoConf, getTopology(topoConf)); + } +} diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/ConstSpoutNullBoltTopo.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/ConstSpoutNullBoltTopo.java new file mode 100755 index 00000000000..63ef51b45c7 --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/ConstSpoutNullBoltTopo.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.storm.perf; + +import java.util.Map; +import org.apache.storm.Config; +import org.apache.storm.generated.StormTopology; +import org.apache.storm.perf.bolt.DevNullBolt; +import org.apache.storm.perf.spout.ConstSpout; +import org.apache.storm.perf.utils.Helper; +import org.apache.storm.topology.BoltDeclarer; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.utils.Utils; + +/** + * This topo helps measure the messaging peak throughput between a spout and a bolt. + * + *

Spout generates a stream of a fixed string. + * + *

Bolt will simply ack and discard the tuple received. + */ +public class ConstSpoutNullBoltTopo { + + public static final String TOPOLOGY_NAME = "ConstSpoutNullBoltTopo"; + public static final String SPOUT_ID = "constSpout"; + public static final String BOLT_ID = "nullBolt"; + + // Configs + public static final String BOLT_COUNT = "bolt.count"; + public static final String SPOUT_COUNT = "spout.count"; + public static final String GROUPING = "grouping"; // can be 'local' or 'shuffle' + + public static final String LOCAL_GROPING = "local"; + public static final String SHUFFLE_GROUPING = "shuffle"; + public static final String DEFAULT_GROUPING = LOCAL_GROPING; + + static StormTopology getTopology(Map conf) { + + // 1 - Setup Spout -------- + ConstSpout spout = new ConstSpout("some data").withOutputFields("str"); + + // 2 - Setup DevNull Bolt -------- + DevNullBolt bolt = new DevNullBolt(); + + + // 3 - Setup Topology -------- + TopologyBuilder builder = new TopologyBuilder(); + + int numSpouts = Helper.getInt(conf, SPOUT_COUNT, 1); + builder.setSpout(SPOUT_ID, spout, numSpouts); + + int numBolts = Helper.getInt(conf, BOLT_COUNT, 1); + BoltDeclarer bd = builder.setBolt(BOLT_ID, bolt, numBolts); + + System.err.printf("====> Using : numSpouts = %d , numBolts = %d\n", numSpouts, numBolts); + + String groupingType = Helper.getStr(conf, GROUPING); + if (groupingType == null || groupingType.equalsIgnoreCase(DEFAULT_GROUPING)) { + bd.localOrShuffleGrouping(SPOUT_ID); + } else if (groupingType.equalsIgnoreCase(SHUFFLE_GROUPING)) { + bd.shuffleGrouping(SPOUT_ID); + } + return builder.createTopology(); + } + + /** + * ConstSpout -> DevNullBolt with configurable grouping (default localOrShuffle). + */ + public static void main(String[] args) throws Exception { + int runTime = -1; + Config topoConf = new Config(); + // Configured for achieving max throughput in single worker mode (empirically found). + // For reference : numbers taken on MacBook Pro mid 2015 + // -- ACKer=0: ~8 mill/sec (batchSz=2k & recvQsize=50k). 6.7 mill/sec (batchSz=1 & recvQsize=1k) + // -- ACKer=1: ~1 mill/sec, lat= ~1 microsec (batchSz=1 & bolt.wait.strategy=Park bolt.wait.park.micros=0) + // -- ACKer=1: ~1.3 mill/sec, lat= ~11 micros (batchSz=1 & receive.buffer.size=1k, bolt.wait & bp.wait = + // Progressive[defaults]) + // -- ACKer=1: ~1.6 mill/sec, lat= ~300 micros (batchSz=500 & bolt.wait.strategy=Park bolt.wait.park.micros=0) + topoConf.put(Config.TOPOLOGY_SPOUT_RECVQ_SKIPS, 8); + topoConf.put(Config.TOPOLOGY_PRODUCER_BATCH_SIZE, 500); + topoConf.put(Config.TOPOLOGY_EXECUTOR_RECEIVE_BUFFER_SIZE, 50_000); + topoConf.put(Config.TOPOLOGY_DISABLE_LOADAWARE_MESSAGING, true); + topoConf.put(Config.TOPOLOGY_STATS_SAMPLE_RATE, 0.0005); + + if (args.length > 0) { + runTime = Integer.parseInt(args[0]); + } + if (args.length > 1) { + topoConf.putAll(Utils.findAndReadConfigFile(args[1])); + } + topoConf.putAll(Utils.readCommandLineOpts()); + + if (args.length > 2) { + System.err.println("args: [runDurationSec] [optionalConfFile]"); + return; + } + // Submit topology to storm cluster + Helper.runOnClusterAndPrintMetrics(runTime, TOPOLOGY_NAME, topoConf, getTopology(topoConf)); + } + +} + diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/ConstSpoutOnlyTopo.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/ConstSpoutOnlyTopo.java new file mode 100755 index 00000000000..1b012fe2fb8 --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/ConstSpoutOnlyTopo.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.storm.perf; + +import org.apache.storm.Config; +import org.apache.storm.generated.StormTopology; +import org.apache.storm.perf.spout.ConstSpout; +import org.apache.storm.perf.utils.Helper; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.utils.Utils; + +/** + * This topo helps measure how fast a spout can produce data (so no bolts are attached). + * + *

Spout generates a stream of a fixed string. + */ +public class ConstSpoutOnlyTopo { + + public static final String TOPOLOGY_NAME = "ConstSpoutOnlyTopo"; + public static final String SPOUT_ID = "constSpout"; + + + static StormTopology getTopology() { + + // 1 - Setup Const Spout -------- + ConstSpout spout = new ConstSpout("some data").withOutputFields("str"); + + // 2 - Setup Topology -------- + TopologyBuilder builder = new TopologyBuilder(); + builder.setSpout(SPOUT_ID, spout, 1); + return builder.createTopology(); + } + + /** + * ConstSpout only topology (No bolts). + */ + public static void main(String[] args) throws Exception { + int runTime = -1; + Config topoConf = new Config(); + if (args.length > 0) { + runTime = Integer.parseInt(args[0]); + } + if (args.length > 1) { + topoConf.putAll(Utils.findAndReadConfigFile(args[1])); + } + topoConf.put(Config.TOPOLOGY_SPOUT_RECVQ_SKIPS, 8); + topoConf.put(Config.TOPOLOGY_DISABLE_LOADAWARE_MESSAGING, true); + topoConf.put(Config.TOPOLOGY_STATS_SAMPLE_RATE, 0.0005); + topoConf.putAll(Utils.readCommandLineOpts()); + if (args.length > 2) { + System.err.println("args: [runDurationSec] [optionalConfFile]"); + return; + } + // Submit topology to storm cluster + Helper.runOnClusterAndPrintMetrics(runTime, TOPOLOGY_NAME, topoConf, getTopology()); + } +} diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/FileReadWordCountTopo.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/FileReadWordCountTopo.java new file mode 100644 index 00000000000..78eaab7eff7 --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/FileReadWordCountTopo.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.storm.perf; + +import java.util.Map; +import org.apache.storm.Config; +import org.apache.storm.generated.StormTopology; +import org.apache.storm.perf.bolt.CountBolt; +import org.apache.storm.perf.bolt.SplitSentenceBolt; +import org.apache.storm.perf.spout.FileReadSpout; +import org.apache.storm.perf.utils.Helper; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.tuple.Fields; +import org.apache.storm.utils.Utils; + +/** + * This topo helps measure speed of word count. + * + *

Spout loads a file into memory on initialization, then emits the lines in an endless loop. + */ +public class FileReadWordCountTopo { + public static final String SPOUT_ID = "spout"; + public static final String COUNT_ID = "counter"; + public static final String SPLIT_ID = "splitter"; + public static final String TOPOLOGY_NAME = "FileReadWordCountTopo"; + + // Config settings + public static final String SPOUT_NUM = "spout.count"; + public static final String SPLIT_NUM = "splitter.count"; + public static final String COUNT_NUM = "counter.count"; + public static final String INPUT_FILE = "input.file"; + + public static final int DEFAULT_SPOUT_NUM = 1; + public static final int DEFAULT_SPLIT_BOLT_NUM = 2; + public static final int DEFAULT_COUNT_BOLT_NUM = 2; + + + static StormTopology getTopology(Map config) { + + final int spoutNum = Helper.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM); + final int spBoltNum = Helper.getInt(config, SPLIT_NUM, DEFAULT_SPLIT_BOLT_NUM); + final int cntBoltNum = Helper.getInt(config, COUNT_NUM, DEFAULT_COUNT_BOLT_NUM); + final String inputFile = Helper.getStr(config, INPUT_FILE); + + TopologyBuilder builder = new TopologyBuilder(); + builder.setSpout(SPOUT_ID, new FileReadSpout(inputFile), spoutNum); + builder.setBolt(SPLIT_ID, new SplitSentenceBolt(), spBoltNum).localOrShuffleGrouping(SPOUT_ID); + builder.setBolt(COUNT_ID, new CountBolt(), cntBoltNum).fieldsGrouping(SPLIT_ID, new Fields(SplitSentenceBolt.FIELDS)); + + return builder.createTopology(); + } + + public static void main(String[] args) throws Exception { + int runTime = -1; + Config topoConf = new Config(); + if (args.length > 0) { + runTime = Integer.parseInt(args[0]); + } + if (args.length > 1) { + topoConf.putAll(Utils.findAndReadConfigFile(args[1])); + } + topoConf.put(Config.TOPOLOGY_PRODUCER_BATCH_SIZE, 1000); + topoConf.put(Config.TOPOLOGY_BOLT_WAIT_STRATEGY, "org.apache.storm.policy.WaitStrategyPark"); + topoConf.put(Config.TOPOLOGY_BOLT_WAIT_PARK_MICROSEC, 0); + topoConf.put(Config.TOPOLOGY_DISABLE_LOADAWARE_MESSAGING, true); + topoConf.put(Config.TOPOLOGY_STATS_SAMPLE_RATE, 0.0005); + + topoConf.putAll(Utils.readCommandLineOpts()); + if (args.length > 2) { + System.err.println("args: [runDurationSec] [optionalConfFile]"); + return; + } + // Submit topology to storm cluster + Helper.runOnClusterAndPrintMetrics(runTime, TOPOLOGY_NAME, topoConf, getTopology(topoConf)); + } +} diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/HdfsSpoutNullBoltTopo.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/HdfsSpoutNullBoltTopo.java new file mode 100644 index 00000000000..9876cac9f3f --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/HdfsSpoutNullBoltTopo.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.perf; + +import java.util.Map; +import org.apache.storm.Config; +import org.apache.storm.generated.StormTopology; +import org.apache.storm.hdfs.spout.HdfsSpout; +import org.apache.storm.hdfs.spout.TextFileReader; +import org.apache.storm.perf.bolt.DevNullBolt; +import org.apache.storm.perf.utils.Helper; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.utils.Utils; + +/** + * This topo helps measure speed of reading from Hdfs. + * + *

Spout Reads from Hdfs. + * + *

Bolt acks and discards tuples. + */ +public class HdfsSpoutNullBoltTopo { + public static final int DEFAULT_SPOUT_NUM = 1; + public static final int DEFAULT_BOLT_NUM = 1; + // names + static final String TOPOLOGY_NAME = "HdfsSpoutNullBoltTopo"; + static final String SPOUT_ID = "hdfsSpout"; + static final String BOLT_ID = "devNullBolt"; + // configs + static final String SPOUT_NUM = "spout.count"; + static final String BOLT_NUM = "bolt.count"; + static final String HDFS_URI = "hdfs.uri"; + static final String SOURCE_DIR = "hdfs.source.dir"; + static final String ARCHIVE_DIR = "hdfs.archive.dir"; + static final String BAD_DIR = "hdfs.bad.dir"; + + static StormTopology getTopology(Map config) { + + final int spoutNum = Helper.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM); + final int boltNum = Helper.getInt(config, BOLT_NUM, DEFAULT_BOLT_NUM); + final String fileFormat = Helper.getStr(config, "text"); + final String hdfsUri = Helper.getStr(config, HDFS_URI); + final String sourceDir = Helper.getStr(config, SOURCE_DIR); + final String archiveDir = Helper.getStr(config, ARCHIVE_DIR); + final String badDir = Helper.getStr(config, BAD_DIR); + + + // 1 - Setup Hdfs Spout -------- + HdfsSpout spout = new HdfsSpout() + .setReaderType(fileFormat) + .setHdfsUri(hdfsUri) + .setSourceDir(sourceDir) + .setArchiveDir(archiveDir) + .setBadFilesDir(badDir) + .withOutputFields(TextFileReader.defaultFields); + + // 2 - DevNull Bolt -------- + DevNullBolt bolt = new DevNullBolt(); + + // 3 - Setup Topology -------- + TopologyBuilder builder = new TopologyBuilder(); + builder.setSpout(SPOUT_ID, spout, spoutNum); + builder.setBolt(BOLT_ID, bolt, boltNum) + .localOrShuffleGrouping(SPOUT_ID); + + return builder.createTopology(); + } + + public static void main(String[] args) throws Exception { + if (args.length != 2) { + System.err.println("args: runDurationSec topConfFile"); + return; + } + + final Integer durationSec = Integer.parseInt(args[0]); + Config topoConf = new Config(); + topoConf.putAll(Utils.findAndReadConfigFile(args[1])); + topoConf.put(Config.TOPOLOGY_PRODUCER_BATCH_SIZE, 1000); + topoConf.put(Config.TOPOLOGY_BOLT_WAIT_STRATEGY, "org.apache.storm.policy.WaitStrategyPark"); + topoConf.put(Config.TOPOLOGY_BOLT_WAIT_PARK_MICROSEC, 0); + topoConf.put(Config.TOPOLOGY_DISABLE_LOADAWARE_MESSAGING, true); + topoConf.put(Config.TOPOLOGY_STATS_SAMPLE_RATE, 0.0005); + + topoConf.putAll(Utils.readCommandLineOpts()); + // Submit to Storm cluster + Helper.runOnClusterAndPrintMetrics(durationSec, TOPOLOGY_NAME, topoConf, getTopology(topoConf)); + } +} diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/KafkaClientHdfsTopo.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/KafkaClientHdfsTopo.java new file mode 100755 index 00000000000..5eb61b2e4e3 --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/KafkaClientHdfsTopo.java @@ -0,0 +1,171 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.storm.perf; + +import java.util.Map; +import org.apache.storm.Config; +import org.apache.storm.generated.StormTopology; +import org.apache.storm.hdfs.bolt.HdfsBolt; +import org.apache.storm.hdfs.bolt.format.DefaultFileNameFormat; +import org.apache.storm.hdfs.bolt.format.FileNameFormat; +import org.apache.storm.hdfs.bolt.format.RecordFormat; +import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy; +import org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy; +import org.apache.storm.hdfs.bolt.sync.CountSyncPolicy; +import org.apache.storm.hdfs.bolt.sync.SyncPolicy; +import org.apache.storm.kafka.spout.FirstPollOffsetStrategy; +import org.apache.storm.kafka.spout.KafkaSpout; +import org.apache.storm.kafka.spout.KafkaSpoutConfig; +import org.apache.storm.perf.utils.Helper; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.utils.ObjectReader; +import org.apache.storm.utils.Utils; + +/** + * This topo helps measure speed of reading from Kafka and writing to Hdfs. + * + *

Spout Reads from Kafka. + * + *

Bolt writes to Hdfs. + */ +public class KafkaClientHdfsTopo { + + // configs - topo parallelism + public static final String SPOUT_NUM = "spout.count"; + public static final String BOLT_NUM = "bolt.count"; + // configs - kafka spout + public static final String KAFKA_TOPIC = "kafka.topic"; + public static final String KAFKA_BOOTSTRAP_HOSTS = "kafka.bootstrap.hosts"; + // configs - hdfs bolt + public static final String HDFS_URI = "hdfs.uri"; + public static final String HDFS_PATH = "hdfs.dir"; + public static final String HDFS_BATCH = "hdfs.batch"; + + + public static final int DEFAULT_SPOUT_NUM = 1; + public static final int DEFAULT_BOLT_NUM = 1; + public static final int DEFAULT_HDFS_BATCH = 1000; + + // names + public static final String TOPOLOGY_NAME = "KafkaHdfsTopo"; + public static final String SPOUT_ID = "kafkaSpout"; + public static final String BOLT_ID = "hdfsBolt"; + + + static StormTopology getTopology(Map config) { + + final int spoutNum = getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM); + final int boltNum = getInt(config, BOLT_NUM, DEFAULT_BOLT_NUM); + + final int hdfsBatch = getInt(config, HDFS_BATCH, DEFAULT_HDFS_BATCH); + + // 1 - Setup Kafka Spout -------- + String bootstrapHosts = getStr(config, KAFKA_BOOTSTRAP_HOSTS); + String topicName = getStr(config, KAFKA_TOPIC); + + KafkaSpoutConfig spoutConfig = KafkaSpoutConfig.builder(bootstrapHosts, topicName) + .setFirstPollOffsetStrategy( + FirstPollOffsetStrategy.EARLIEST) + .build(); + + KafkaSpout spout = new KafkaSpout<>(spoutConfig); + + // 2 - Setup HFS Bolt -------- + String hdfsUrls = getStr(config, HDFS_URI); + RecordFormat format = new LineWriter("value"); + SyncPolicy syncPolicy = new CountSyncPolicy(hdfsBatch); + FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(1.0f, FileSizeRotationPolicy.Units.GB); + + FileNameFormat fileNameFormat = new DefaultFileNameFormat().withPath(getStr(config, HDFS_PATH)); + + // Instantiate the HdfsBolt + HdfsBolt bolt = new HdfsBolt() + .withFsUrl(hdfsUrls) + .withFileNameFormat(fileNameFormat) + .withRecordFormat(format) + .withRotationPolicy(rotationPolicy) + .withSyncPolicy(syncPolicy); + + + // 3 - Setup Topology -------- + TopologyBuilder builder = new TopologyBuilder(); + builder.setSpout(SPOUT_ID, spout, spoutNum); + builder.setBolt(BOLT_ID, bolt, boltNum) + .localOrShuffleGrouping(SPOUT_ID); + + return builder.createTopology(); + } + + + public static int getInt(Map map, Object key, int def) { + return ObjectReader.getInt(Utils.get(map, key, def)); + } + + public static String getStr(Map map, Object key) { + return (String) map.get(key); + } + + + /** + * Copies text file content from sourceDir to destinationDir. Moves source files into sourceDir after its done consuming. + */ + public static void main(String[] args) throws Exception { + + if (args.length != 2) { + System.err.println("args: runDurationSec topConfFile"); + return; + } + + String confFile = args[1]; + Map topoConf = Utils.findAndReadConfigFile(confFile); + topoConf.put(Config.TOPOLOGY_PRODUCER_BATCH_SIZE, 1000); + topoConf.put(Config.TOPOLOGY_DISABLE_LOADAWARE_MESSAGING, true); + topoConf.put(Config.TOPOLOGY_STATS_SAMPLE_RATE, 0.0005); + topoConf.put(Config.TOPOLOGY_BOLT_WAIT_STRATEGY, "org.apache.storm.policy.WaitStrategyPark"); + topoConf.put(Config.TOPOLOGY_BOLT_WAIT_PARK_MICROSEC, 0); + + topoConf.putAll(Utils.readCommandLineOpts()); + // Submit topology to Storm cluster + Integer durationSec = Integer.parseInt(args[0]); + Helper.runOnClusterAndPrintMetrics(durationSec, TOPOLOGY_NAME, topoConf, getTopology(topoConf)); + } + + public static class LineWriter implements RecordFormat { + private String lineDelimiter = System.lineSeparator(); + private String fieldName; + + public LineWriter(String fieldName) { + this.fieldName = fieldName; + } + + /** + * Overrides the default record delimiter. + */ + public LineWriter withLineDelimiter(String delimiter) { + this.lineDelimiter = delimiter; + return this; + } + + @Override + public byte[] format(Tuple tuple) { + return (tuple.getValueByField(fieldName).toString() + this.lineDelimiter).getBytes(); + } + } +} diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/KafkaClientSpoutNullBoltTopo.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/KafkaClientSpoutNullBoltTopo.java new file mode 100644 index 00000000000..4ac1ed8e86e --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/KafkaClientSpoutNullBoltTopo.java @@ -0,0 +1,115 @@ +/* + * Copyright 2018 The Apache Software Foundation. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.perf; + +import java.util.Map; +import java.util.Optional; +import org.apache.storm.Config; +import org.apache.storm.generated.StormTopology; +import org.apache.storm.kafka.spout.FirstPollOffsetStrategy; +import org.apache.storm.kafka.spout.KafkaSpout; +import org.apache.storm.kafka.spout.KafkaSpoutConfig; +import org.apache.storm.kafka.spout.KafkaSpoutConfig.ProcessingGuarantee; +import org.apache.storm.perf.bolt.DevNullBolt; +import org.apache.storm.perf.utils.Helper; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.utils.Utils; + +/** + * Benchmark topology for measuring spout read/emit/ack performance. The spout reads and emits tuples. The bolt acks and discards received + * tuples. + */ +public class KafkaClientSpoutNullBoltTopo { + + // configs - topo parallelism + public static final String SPOUT_NUM = "spout.count"; + public static final String BOLT_NUM = "bolt.count"; + + // configs - kafka spout + public static final String BOOTSTRAP_SERVERS = "bootstrap.servers"; + public static final String KAFKA_TOPIC = "kafka.topic"; + public static final String PROCESSING_GUARANTEE = "processing.guarantee"; + public static final String OFFSET_COMMIT_PERIOD_MS = "offset.commit.period.ms"; + + public static final int DEFAULT_SPOUT_NUM = 1; + public static final int DEFAULT_BOLT_NUM = 1; + + // names + public static final String TOPOLOGY_NAME = KafkaClientSpoutNullBoltTopo.class.getSimpleName(); + public static final String SPOUT_ID = "kafkaSpout"; + public static final String BOLT_ID = "devNullBolt"; + + /** + * Create and configure the topology. + */ + public static StormTopology getTopology(Map config) { + + final int spoutNum = Helper.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM); + final int boltNum = Helper.getInt(config, BOLT_NUM, DEFAULT_BOLT_NUM); + // 1 - Setup Kafka Spout -------- + + String bootstrapServers = Optional.ofNullable(Helper.getStr(config, BOOTSTRAP_SERVERS)).orElse("127.0.0.1:9092"); + String kafkaTopic = Optional.ofNullable(Helper.getStr(config, KAFKA_TOPIC)).orElse("storm-perf-null-bolt-topic"); + ProcessingGuarantee processingGuarantee = ProcessingGuarantee.valueOf( + Optional.ofNullable(Helper.getStr(config, PROCESSING_GUARANTEE)) + .orElse(ProcessingGuarantee.AT_LEAST_ONCE.name())); + int offsetCommitPeriodMs = Helper.getInt(config, OFFSET_COMMIT_PERIOD_MS, 30_000); + + KafkaSpoutConfig kafkaSpoutConfig = KafkaSpoutConfig.builder(bootstrapServers, kafkaTopic) + .setProcessingGuarantee(processingGuarantee) + .setOffsetCommitPeriodMs(offsetCommitPeriodMs) + .setFirstPollOffsetStrategy( + FirstPollOffsetStrategy.EARLIEST) + .setTupleTrackingEnforced(true) + .build(); + + KafkaSpout spout = new KafkaSpout<>(kafkaSpoutConfig); + + // 2 - DevNull Bolt -------- + DevNullBolt bolt = new DevNullBolt(); + + // 3 - Setup Topology -------- + TopologyBuilder builder = new TopologyBuilder(); + builder.setSpout(SPOUT_ID, spout, spoutNum); + builder.setBolt(BOLT_ID, bolt, boltNum) + .localOrShuffleGrouping(SPOUT_ID); + + return builder.createTopology(); + } + + /** + * Start the topology. + */ + public static void main(String[] args) throws Exception { + int durationSec = -1; + Config topoConf = new Config(); + if (args.length > 0) { + durationSec = Integer.parseInt(args[0]); + } + if (args.length > 1) { + topoConf.putAll(Utils.findAndReadConfigFile(args[1])); + } + if (args.length > 2) { + System.err.println("args: [runDurationSec] [optionalConfFile]"); + return; + } + + // Submit to Storm cluster + Helper.runOnClusterAndPrintMetrics(durationSec, TOPOLOGY_NAME, topoConf, getTopology(topoConf)); + } + +} diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/LowThroughputTopo.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/LowThroughputTopo.java new file mode 100644 index 00000000000..71ffd7caeae --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/LowThroughputTopo.java @@ -0,0 +1,155 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.storm.perf; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import org.apache.storm.Config; +import org.apache.storm.generated.StormTopology; +import org.apache.storm.perf.utils.Helper; +import org.apache.storm.spout.SpoutOutputCollector; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.BoltDeclarer; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.topology.base.BaseRichBolt; +import org.apache.storm.topology.base.BaseRichSpout; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.utils.ObjectReader; +import org.apache.storm.utils.Utils; +import org.slf4j.LoggerFactory; + +public class LowThroughputTopo { + private static final String SPOUT_ID = "ThrottledSpout"; + private static final String BOLT_ID = "LatencyPrintBolt"; + private static final Integer SPOUT_COUNT = 1; + private static final Integer BOLT_COUNT = 1; + private static final String SLEEP_MS = "sleep"; + + static StormTopology getTopology(Map conf) { + + Long sleepMs = ObjectReader.getLong(conf.get(SLEEP_MS)); + // 1 - Setup Spout -------- + ThrottledSpout spout = new ThrottledSpout(sleepMs).withOutputFields(ThrottledSpout.DEFAULT_FIELD_NAME); + + // 2 - Setup DevNull Bolt -------- + LatencyPrintBolt bolt = new LatencyPrintBolt(); + + + // 3 - Setup Topology -------- + TopologyBuilder builder = new TopologyBuilder(); + + builder.setSpout(SPOUT_ID, spout, Helper.getInt(conf, SPOUT_COUNT, 1)); + BoltDeclarer bd = builder.setBolt(BOLT_ID, bolt, Helper.getInt(conf, BOLT_COUNT, 1)); + + bd.localOrShuffleGrouping(SPOUT_ID); + // bd.shuffleGrouping(SPOUT_ID); + return builder.createTopology(); + } + + public static void main(String[] args) throws Exception { + int runTime = -1; + Map topoConf = Utils.findAndReadConfigFile(args[1]); + topoConf.put(Config.TOPOLOGY_SPOUT_RECVQ_SKIPS, 1); + if (args.length > 0) { + long sleepMs = Integer.parseInt(args[0]); + topoConf.put(SLEEP_MS, sleepMs); + } + if (args.length > 1) { + runTime = Integer.parseInt(args[1]); + } + if (args.length > 2) { + System.err.println("args: spoutSleepMs [runDurationSec] "); + return; + } + topoConf.putAll(Utils.readCommandLineOpts()); + // Submit topology to storm cluster + Helper.runOnClusterAndPrintMetrics(runTime, "LowThroughputTopo", topoConf, getTopology(topoConf)); + } + + private static class ThrottledSpout extends BaseRichSpout { + + static final String DEFAULT_FIELD_NAME = "time"; + private String fieldName = DEFAULT_FIELD_NAME; + private SpoutOutputCollector collector = null; + private long sleepTimeMs; + + ThrottledSpout(long sleepMs) { + this.sleepTimeMs = sleepMs; + } + + public ThrottledSpout withOutputFields(String fieldName) { + this.fieldName = fieldName; + return this; + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields(fieldName)); + } + + @Override + public void open(Map conf, TopologyContext context, + SpoutOutputCollector collector) { + this.collector = collector; + } + + @Override + public void nextTuple() { + Long now = System.currentTimeMillis(); + List tuple = Collections.singletonList(now); + collector.emit(tuple, now); + Utils.sleep(sleepTimeMs); + } + + @Override + public void ack(Object msgId) { + super.ack(msgId); + } + } + + private static class LatencyPrintBolt extends BaseRichBolt { + private static final org.slf4j.Logger LOG = LoggerFactory.getLogger(LatencyPrintBolt.class); + private OutputCollector collector; + + @Override + public void prepare(Map topoConf, + TopologyContext context, + OutputCollector collector) { + this.collector = collector; + } + + @Override + public void execute(Tuple tuple) { + Long now = System.currentTimeMillis(); + Long then = (Long) tuple.getValues().get(0); + LOG.warn("Latency {} ", now - then); + System.err.println(now - then); + collector.ack(tuple); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + + } + } +} diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/SimplifiedWordCountTopo.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/SimplifiedWordCountTopo.java new file mode 100644 index 00000000000..0a9148c593d --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/SimplifiedWordCountTopo.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.storm.perf; + +import java.util.Map; +import org.apache.storm.Config; +import org.apache.storm.generated.StormTopology; +import org.apache.storm.perf.bolt.CountBolt; +import org.apache.storm.perf.spout.WordGenSpout; +import org.apache.storm.perf.utils.Helper; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.tuple.Fields; +import org.apache.storm.utils.Utils; + +public class SimplifiedWordCountTopo { + + public static final String SPOUT_ID = "spout"; + public static final String COUNT_ID = "counter"; + public static final String TOPOLOGY_NAME = "SimplifiedWordCountTopo"; + + // Config settings + public static final String SPOUT_NUM = "spout.count"; + public static final String BOLT_NUM = "counter.count"; + public static final String INPUT_FILE = "input.file"; + + public static final int DEFAULT_SPOUT_NUM = 1; + public static final int DEFAULT_COUNT_BOLT_NUM = 1; + + + static StormTopology getTopology(Map config) { + + final int spoutNum = Helper.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM); + final int cntBoltNum = Helper.getInt(config, BOLT_NUM, DEFAULT_COUNT_BOLT_NUM); + final String inputFile = Helper.getStr(config, INPUT_FILE); + + TopologyBuilder builder = new TopologyBuilder(); + builder.setSpout(SPOUT_ID, new WordGenSpout(inputFile), spoutNum); + builder.setBolt(COUNT_ID, new CountBolt(), cntBoltNum).fieldsGrouping(SPOUT_ID, new Fields(WordGenSpout.FIELDS)); + + return builder.createTopology(); + } + + // Toplogy: WorGenSpout -> FieldsGrouping -> CountBolt + public static void main(String[] args) throws Exception { + int runTime = -1; + Config topoConf = new Config(); + if (args.length > 2) { + String file = args[0]; + runTime = Integer.parseInt(args[1]); + topoConf.put(INPUT_FILE, file); + topoConf.putAll(Utils.findAndReadConfigFile(args[1])); + } + if (args.length > 3 || args.length == 0) { + System.err.println("args: file.txt [runDurationSec] [optionalConfFile]"); + return; + } + topoConf.put(Config.TOPOLOGY_SPOUT_RECVQ_SKIPS, 8); + topoConf.put(Config.TOPOLOGY_PRODUCER_BATCH_SIZE, 1000); + topoConf.put(Config.TOPOLOGY_DISABLE_LOADAWARE_MESSAGING, true); + topoConf.put(Config.TOPOLOGY_STATS_SAMPLE_RATE, 0.0005); + topoConf.put(Config.TOPOLOGY_BOLT_WAIT_STRATEGY, "org.apache.storm.policy.WaitStrategyPark"); + topoConf.put(Config.TOPOLOGY_BOLT_WAIT_PARK_MICROSEC, 0); + + topoConf.putAll(Utils.readCommandLineOpts()); + // Submit topology to storm cluster + Helper.runOnClusterAndPrintMetrics(runTime, TOPOLOGY_NAME, topoConf, getTopology(topoConf)); + } +} diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/StrGenSpoutHdfsBoltTopo.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/StrGenSpoutHdfsBoltTopo.java new file mode 100755 index 00000000000..0c53db97c6f --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/StrGenSpoutHdfsBoltTopo.java @@ -0,0 +1,160 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + + +package org.apache.storm.perf; + +import java.util.Map; +import org.apache.storm.Config; +import org.apache.storm.generated.StormTopology; +import org.apache.storm.hdfs.bolt.HdfsBolt; +import org.apache.storm.hdfs.bolt.format.DefaultFileNameFormat; +import org.apache.storm.hdfs.bolt.format.FileNameFormat; +import org.apache.storm.hdfs.bolt.format.RecordFormat; +import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy; +import org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy; +import org.apache.storm.hdfs.bolt.sync.CountSyncPolicy; +import org.apache.storm.hdfs.bolt.sync.SyncPolicy; +import org.apache.storm.perf.spout.StringGenSpout; +import org.apache.storm.perf.utils.Helper; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.utils.Utils; + +/** + * This topo helps measure speed of writing to Hdfs. + * + *

Spout generates fixed length random strings. + * + *

Bolt writes to Hdfs. + */ +public class StrGenSpoutHdfsBoltTopo { + + // configs - topo parallelism + public static final String SPOUT_NUM = "spout.count"; + public static final String BOLT_NUM = "bolt.count"; + + // configs - hdfs bolt + public static final String HDFS_URI = "hdfs.uri"; + public static final String HDFS_PATH = "hdfs.dir"; + public static final String HDFS_BATCH = "hdfs.batch"; + + public static final int DEFAULT_SPOUT_NUM = 1; + public static final int DEFAULT_BOLT_NUM = 1; + public static final int DEFAULT_HDFS_BATCH = 1000; + + // names + public static final String TOPOLOGY_NAME = "StrGenSpoutHdfsBoltTopo"; + public static final String SPOUT_ID = "GenSpout"; + public static final String BOLT_ID = "hdfsBolt"; + + + static StormTopology getTopology(Map topoConf) { + final int hdfsBatch = Helper.getInt(topoConf, HDFS_BATCH, DEFAULT_HDFS_BATCH); + + // 1 - Setup StringGen Spout -------- + StringGenSpout spout = new StringGenSpout(100).withFieldName("str"); + + + // 2 - Setup HFS Bolt -------- + String hdfsUrl = Helper.getStr(topoConf, HDFS_URI); + RecordFormat format = new LineWriter("str"); + SyncPolicy syncPolicy = new CountSyncPolicy(hdfsBatch); + FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(1.0f, FileSizeRotationPolicy.Units.GB); + final int spoutNum = Helper.getInt(topoConf, SPOUT_NUM, DEFAULT_SPOUT_NUM); + final int boltNum = Helper.getInt(topoConf, BOLT_NUM, DEFAULT_BOLT_NUM); + + // Use default, Storm-generated file names + FileNameFormat fileNameFormat = new DefaultFileNameFormat().withPath(Helper.getStr(topoConf, HDFS_PATH)); + + // Instantiate the HdfsBolt + HdfsBolt bolt = new HdfsBolt() + .withFsUrl(hdfsUrl) + .withFileNameFormat(fileNameFormat) + .withRecordFormat(format) + .withRotationPolicy(rotationPolicy) + .withSyncPolicy(syncPolicy); + + + // 3 - Setup Topology -------- + + TopologyBuilder builder = new TopologyBuilder(); + builder.setSpout(SPOUT_ID, spout, spoutNum); + builder.setBolt(BOLT_ID, bolt, boltNum) + .localOrShuffleGrouping(SPOUT_ID); + + return builder.createTopology(); + } + + + /** + * Spout generates random strings and HDFS bolt writes them to a text file. + */ + public static void main(String[] args) throws Exception { + String confFile = "conf/HdfsSpoutTopo.yaml"; + int runTime = -1; //Run until Ctrl-C + if (args.length > 0) { + runTime = Integer.parseInt(args[0]); + } + + if (args.length > 1) { + confFile = args[1]; + } + + // Submit to Storm cluster + if (args.length > 2) { + System.err.println("args: [runDurationSec] [confFile]"); + return; + } + + Map topoConf = Utils.findAndReadConfigFile(confFile); + topoConf.put(Config.TOPOLOGY_PRODUCER_BATCH_SIZE, 1000); + topoConf.put(Config.TOPOLOGY_BOLT_WAIT_STRATEGY, "org.apache.storm.policy.WaitStrategyPark"); + topoConf.put(Config.TOPOLOGY_BOLT_WAIT_PARK_MICROSEC, 0); + topoConf.put(Config.TOPOLOGY_DISABLE_LOADAWARE_MESSAGING, true); + topoConf.put(Config.TOPOLOGY_STATS_SAMPLE_RATE, 0.0005); + + topoConf.putAll(Utils.readCommandLineOpts()); + Helper.runOnClusterAndPrintMetrics(runTime, TOPOLOGY_NAME, topoConf, getTopology(topoConf)); + } + + + public static class LineWriter implements RecordFormat { + private static final long serialVersionUID = 7524288317405514146L; + private String lineDelimiter = System.lineSeparator(); + private String fieldName; + + public LineWriter(String fieldName) { + this.fieldName = fieldName; + } + + /** + * Overrides the default record delimiter. + */ + public LineWriter withLineDelimiter(String delimiter) { + this.lineDelimiter = delimiter; + return this; + } + + @Override + public byte[] format(Tuple tuple) { + return (tuple.getValueByField(fieldName).toString() + this.lineDelimiter).getBytes(); + } + } + +} diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/ThroughputMeter.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/ThroughputMeter.java new file mode 100644 index 00000000000..3d8e736224e --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/ThroughputMeter.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.storm.perf; + +public class ThroughputMeter { + + private String name; + private long startTime = 0; + private int count; + private long endTime = 0; + + public ThroughputMeter(String name) { + this.name = name; + this.startTime = System.currentTimeMillis(); + } + + /** + * Calculate throughput. + * @return events/sec + */ + private static double calcThroughput(long count, long startTime, long endTime) { + long gap = (endTime - startTime); + return (count / gap) * 1000; + } + + public String getName() { + return name; + } + + public void record() { + ++count; + } + + public double stop() { + if (startTime == 0) { + return 0; + } + if (endTime == 0) { + this.endTime = System.currentTimeMillis(); + } + return calcThroughput(count, startTime, endTime); + } + + // Returns the recorded throughput since the last call to getCurrentThroughput() + // or since this meter was instantiated if being called for fisrt time. + public double getCurrentThroughput() { + if (startTime == 0) { + return 0; + } + long currTime = (endTime == 0) ? System.currentTimeMillis() : endTime; + + double result = calcThroughput(count, startTime, currTime) / 1000; // K/sec + startTime = currTime; + count = 0; + return result; + } +} diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/bolt/CountBolt.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/bolt/CountBolt.java new file mode 100644 index 00000000000..ee1bf7ee1f7 --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/bolt/CountBolt.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.storm.perf.bolt; + +import java.util.HashMap; +import java.util.Map; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.BasicOutputCollector; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.base.BaseBasicBolt; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; + + +public class CountBolt extends BaseBasicBolt { + public static final String FIELDS_WORD = "word"; + public static final String FIELDS_COUNT = "count"; + + Map counts = new HashMap<>(); + + @Override + public void prepare(Map topoConf, TopologyContext context) { + } + + @Override + public void execute(Tuple tuple, BasicOutputCollector collector) { + String word = tuple.getString(0); + Integer count = counts.get(word); + if (count == null) { + count = 0; + } + count++; + counts.put(word, count); + collector.emit(new Values(word, count)); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields(FIELDS_WORD, FIELDS_COUNT)); + } +} diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/bolt/DevNullBolt.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/bolt/DevNullBolt.java new file mode 100755 index 00000000000..cc181c04c47 --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/bolt/DevNullBolt.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.storm.perf.bolt; + +import java.util.Map; +import java.util.concurrent.locks.LockSupport; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.base.BaseRichBolt; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.utils.ObjectReader; +import org.slf4j.LoggerFactory; + + +public class DevNullBolt extends BaseRichBolt { + private static final org.slf4j.Logger LOG = LoggerFactory.getLogger(DevNullBolt.class); + private OutputCollector collector; + private Long sleepNanos; + private int count = 0; + + @Override + public void prepare(Map topoConf, TopologyContext context, OutputCollector collector) { + this.collector = collector; + this.sleepNanos = ObjectReader.getLong(topoConf.get("nullbolt.sleep.micros"), 0L) * 1_000; + } + + @Override + public void execute(Tuple tuple) { + collector.ack(tuple); + if (sleepNanos > 0) { + LockSupport.parkNanos(sleepNanos); + } + ++count; + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + + } +} diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/bolt/IdBolt.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/bolt/IdBolt.java new file mode 100644 index 00000000000..5387499bb8f --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/bolt/IdBolt.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.storm.perf.bolt; + +import java.util.Map; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.base.BaseRichBolt; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; + +public class IdBolt extends BaseRichBolt { + private OutputCollector collector; + + @Override + public void prepare(Map topoConf, TopologyContext context, OutputCollector collector) { + this.collector = collector; + } + + @Override + public void execute(Tuple tuple) { + collector.emit(tuple, new Values(tuple.getValues())); + collector.ack(tuple); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("field1")); + } +} diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/bolt/SplitSentenceBolt.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/bolt/SplitSentenceBolt.java new file mode 100644 index 00000000000..85a3f5aa9d4 --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/bolt/SplitSentenceBolt.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.storm.perf.bolt; + +import java.util.Map; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.BasicOutputCollector; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.base.BaseBasicBolt; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; + + +public class SplitSentenceBolt extends BaseBasicBolt { + public static final String FIELDS = "word"; + + public static String[] splitSentence(String sentence) { + if (sentence != null) { + return sentence.split("\\s+"); + } + return null; + } + + @Override + public void prepare(Map topoConf, TopologyContext context) { + } + + @Override + public void execute(Tuple input, BasicOutputCollector collector) { + for (String word : splitSentence(input.getString(0))) { + collector.emit(new Values(word)); + } + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields(FIELDS)); + } +} diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Acker.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Acker.java new file mode 100644 index 00000000000..adcc3a3d6bb --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Acker.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.storm.perf.queuetest; + +import org.apache.storm.utils.JCQueue; + +/** + * Reads from ackerInQ and writes to spout queue. + */ +class Acker extends MyThread { + private final JCQueue ackerInQ; + private final JCQueue spoutInQ; + + Acker(JCQueue ackerInQ, JCQueue spoutInQ) { + super("Acker"); + this.ackerInQ = ackerInQ; + this.spoutInQ = spoutInQ; + } + + + @Override + public void run() { + long start = System.currentTimeMillis(); + Handler handler = new Handler(); + while (!Thread.interrupted()) { + ackerInQ.consume(handler); + } + runTime = System.currentTimeMillis() - start; + } + + private class Handler implements JCQueue.Consumer { + @Override + public void accept(Object event) { + try { + spoutInQ.publish(event); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + + @Override + public void flush() throws InterruptedException { + spoutInQ.flush(); + } + } +} diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/AckingProducer.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/AckingProducer.java new file mode 100644 index 00000000000..0214515d4c2 --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/AckingProducer.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.storm.perf.queuetest; + +import org.apache.storm.utils.JCQueue; + +/** + * Writes to two queues. + */ +class AckingProducer extends MyThread { + private final JCQueue ackerInQ; + private final JCQueue spoutInQ; + + AckingProducer(JCQueue ackerInQ, JCQueue spoutInQ) { + super("AckingProducer"); + this.ackerInQ = ackerInQ; + this.spoutInQ = spoutInQ; + } + + @Override + public void run() { + try { + Handler handler = new Handler(); + long start = System.currentTimeMillis(); + while (!Thread.interrupted()) { + int x = spoutInQ.consume(handler); + ackerInQ.publish(count); + } + runTime = System.currentTimeMillis() - start; + } catch (InterruptedException e) { + return; + } + } + + private class Handler implements JCQueue.Consumer { + @Override + public void accept(Object event) { + // no-op + } + + @Override + public void flush() { + // no-op + } + } +} diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Consumer.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Consumer.java new file mode 100644 index 00000000000..eadb51d8b5e --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Consumer.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.storm.perf.queuetest; + +import java.util.concurrent.locks.LockSupport; +import org.apache.storm.utils.JCQueue; +import org.apache.storm.utils.MutableLong; + +class Consumer extends MyThread { + public final MutableLong counter = new MutableLong(0); + private final JCQueue queue; + + Consumer(JCQueue queue) { + super("Consumer"); + this.queue = queue; + } + + @Override + public void run() { + Handler handler = new Handler(); + long start = System.currentTimeMillis(); + while (!Thread.interrupted()) { + int x = queue.consume(handler); + if (x == 0) { + LockSupport.parkNanos(1); + } + } + runTime = System.currentTimeMillis() - start; + } + + @Override + public long getCount() { + return counter.get(); + } + + private class Handler implements JCQueue.Consumer { + @Override + public void accept(Object event) { + counter.increment(); + } + + @Override + public void flush() { + // no-op + } + } +} diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Forwarder.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Forwarder.java new file mode 100644 index 00000000000..a5781d08554 --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Forwarder.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.storm.perf.queuetest; + +import java.util.concurrent.locks.LockSupport; +import org.apache.storm.utils.JCQueue; +import org.apache.storm.utils.MutableLong; + +class Forwarder extends MyThread { + public final MutableLong counter = new MutableLong(0); + private final JCQueue inq; + private final JCQueue outq; + + Forwarder(JCQueue inq, JCQueue outq) { + super("Forwarder"); + this.inq = inq; + this.outq = outq; + } + + @Override + public void run() { + Handler handler = new Handler(); + long start = System.currentTimeMillis(); + while (!Thread.interrupted()) { + int x = inq.consume(handler); + if (x == 0) { + LockSupport.parkNanos(1); + } + } + runTime = System.currentTimeMillis() - start; + } + + @Override + public long getCount() { + return counter.get(); + } + + private class Handler implements JCQueue.Consumer { + @Override + public void accept(Object event) { + try { + outq.publish(event); + counter.increment(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + + @Override + public void flush() { + // no-op + } + } +} diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/JCQueuePerfTest.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/JCQueuePerfTest.java new file mode 100644 index 00000000000..2cbee19e340 --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/JCQueuePerfTest.java @@ -0,0 +1,151 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.storm.perf.queuetest; + +import java.util.Collections; +import org.apache.storm.metrics2.StormMetricRegistry; +import org.apache.storm.policy.WaitStrategyPark; +import org.apache.storm.utils.JCQueue; + +@SuppressWarnings("checkstyle:AbbreviationAsWordInName") +public class JCQueuePerfTest { + // Usage: Let it and then explicitly terminate. + // Metrics will be printed when application is terminated. + public static void main(String[] args) throws Exception { + // oneProducer1Consumer(1000); // -- measurement 1 + // twoProducer1Consumer(1000); // -- measurement 2 + // threeProducer1Consumer(1); // -- measurement 3 + + // oneProducer2Consumers(); // -- measurement 4 + + // producerFwdConsumer(); // -- measurement 5 + + // ackingProducerSimulation(); // -- measurement 6 + + while (true) { + Thread.sleep(1000); + } + + } + + private static void ackingProducerSimulation() { + WaitStrategyPark ws = new WaitStrategyPark(100); + StormMetricRegistry registry = new StormMetricRegistry(); + JCQueue spoutQ = new JCQueue("spoutQ", "spoutQ", 1024, 0, 100, ws, "test", "test", Collections.singletonList(1000), 1000, registry); + JCQueue ackQ = new JCQueue("ackQ", "ackQ", 1024, 0, 100, ws, "test", "test", Collections.singletonList(1000), 1000, registry); + + final AckingProducer ackingProducer = new AckingProducer(spoutQ, ackQ); + final Acker acker = new Acker(ackQ, spoutQ); + + runAllThds(ackingProducer, acker); + } + + private static void producerFwdConsumer(int prodBatchSz) { + WaitStrategyPark ws = new WaitStrategyPark(100); + StormMetricRegistry registry = new StormMetricRegistry(); + JCQueue q1 = new JCQueue("q1", "q1", 1024, 0, prodBatchSz, ws, "test", "test", + Collections.singletonList(1000), 1000, registry); + JCQueue q2 = new JCQueue("q2", "q2", 1024, 0, prodBatchSz, ws, "test", "test", Collections.singletonList(1000), 1000, registry); + + final Producer prod = new Producer(q1); + final Forwarder fwd = new Forwarder(q1, q2); + final Consumer cons = new Consumer(q2); + + runAllThds(prod, fwd, cons); + } + + + private static void oneProducer1Consumer(int prodBatchSz) { + JCQueue q1 = new JCQueue("q1", "q1", 50_000, 0, prodBatchSz, new WaitStrategyPark(100), "test", "test", + Collections.singletonList(1000), 1000, new StormMetricRegistry()); + + final Producer prod1 = new Producer(q1); + final Consumer cons1 = new Consumer(q1); + + runAllThds(prod1, cons1); + } + + private static void twoProducer1Consumer(int prodBatchSz) { + JCQueue q1 = new JCQueue("q1", "q1", 50_000, 0, prodBatchSz, new WaitStrategyPark(100), "test", "test", + Collections.singletonList(1000), 1000, new StormMetricRegistry()); + + final Producer prod1 = new Producer(q1); + final Producer prod2 = new Producer(q1); + final Consumer cons1 = new Consumer(q1); + + runAllThds(prod1, prod2, cons1); + } + + private static void threeProducer1Consumer(int prodBatchSz) { + JCQueue q1 = new JCQueue("q1", "q1", 50_000, 0, prodBatchSz, new WaitStrategyPark(100), "test", "test", + Collections.singletonList(1000), 1000, new StormMetricRegistry()); + + final Producer prod1 = new Producer(q1); + final Producer prod2 = new Producer(q1); + final Producer prod3 = new Producer(q1); + final Consumer cons1 = new Consumer(q1); + + runAllThds(prod1, prod2, prod3, cons1); + } + + + private static void oneProducer2Consumers(int prodBatchSz) { + WaitStrategyPark ws = new WaitStrategyPark(100); + StormMetricRegistry registry = new StormMetricRegistry(); + JCQueue q1 = new JCQueue("q1", "q1", 1024, 0, prodBatchSz, ws, "test", "test", Collections.singletonList(1000), 1000, registry); + JCQueue q2 = new JCQueue("q2", "q2", 1024, 0, prodBatchSz, ws, "test", "test", Collections.singletonList(1000), 1000, registry); + + final Producer2 prod1 = new Producer2(q1, q2); + final Consumer cons1 = new Consumer(q1); + final Consumer cons2 = new Consumer(q2); + + runAllThds(prod1, cons1, cons2); + } + + public static void runAllThds(MyThread... threads) { + for (Thread thread : threads) { + thread.start(); + } + addShutdownHooks(threads); + } + + public static void addShutdownHooks(MyThread... threads) { + + Runtime.getRuntime().addShutdownHook(new Thread(() -> { + try { + System.err.println("Stopping"); + for (Thread thread : threads) { + thread.interrupt(); + } + + for (Thread thread : threads) { + System.err.println("Waiting for " + thread.getName()); + thread.join(); + } + + for (MyThread thread : threads) { + System.err.printf("%s : %d, Throughput: %,d \n", thread.getName(), thread.count, thread.throughput()); + } + } catch (InterruptedException e) { + return; + } + })); + + } +} diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/MyThread.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/MyThread.java new file mode 100644 index 00000000000..5a5f6fce76e --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/MyThread.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.storm.perf.queuetest; + +abstract class MyThread extends Thread { + public long count = 0; + public long runTime = 0; + + MyThread(String thdName) { + super(thdName); + } + + public long throughput() { + return getCount() / (runTime / 1000); + } + + public long getCount() { + return count; + } +} diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Producer.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Producer.java new file mode 100644 index 00000000000..2801819ce0a --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Producer.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.storm.perf.queuetest; + +import org.apache.storm.utils.JCQueue; + +class Producer extends MyThread { + private final JCQueue queue; + + Producer(JCQueue queue) { + super("Producer"); + this.queue = queue; + } + + @Override + public void run() { + try { + long start = System.currentTimeMillis(); + while (!Thread.interrupted()) { + queue.publish(++count); + } + runTime = System.currentTimeMillis() - start; + } catch (InterruptedException e) { + return; + } + } + +} diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Producer2.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Producer2.java new file mode 100644 index 00000000000..c36d88de468 --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Producer2.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.storm.perf.queuetest; + +import org.apache.storm.utils.JCQueue; + +/** + * Writes to two queues. + */ +class Producer2 extends MyThread { + private final JCQueue q1; + private final JCQueue q2; + + Producer2(JCQueue q1, JCQueue q2) { + super("Producer2"); + this.q1 = q1; + this.q2 = q2; + } + + @Override + public void run() { + try { + long start = System.currentTimeMillis(); + while (!Thread.interrupted()) { + q1.publish(++count); + q2.publish(count); + } + runTime = System.currentTimeMillis() - start; + } catch (InterruptedException e) { + return; + } + + } +} \ No newline at end of file diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/spout/ConstSpout.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/spout/ConstSpout.java new file mode 100755 index 00000000000..656f992eff0 --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/spout/ConstSpout.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.storm.perf.spout; + +import java.util.ArrayList; +import java.util.Map; +import org.apache.storm.spout.SpoutOutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.base.BaseRichSpout; +import org.apache.storm.tuple.Fields; +import org.apache.storm.utils.ObjectReader; + +public class ConstSpout extends BaseRichSpout { + + private static final String DEFAUT_FIELD_NAME = "str"; + private String value; + private String fieldName = DEFAUT_FIELD_NAME; + private SpoutOutputCollector collector = null; + private int count = 0; + private Long sleep = 0L; + private int ackCount = 0; + + public ConstSpout(String value) { + this.value = value; + } + + public ConstSpout withOutputFields(String fieldName) { + this.fieldName = fieldName; + return this; + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields(fieldName)); + } + + @Override + public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { + this.collector = collector; + this.sleep = ObjectReader.getLong(conf.get("spout.sleep"), 0L); + } + + @Override + public void nextTuple() { + ArrayList tuple = new ArrayList(1); + tuple.add(value); + collector.emit(tuple, count++); + try { + if (sleep > 0) { + Thread.sleep(sleep); + } + } catch (InterruptedException e) { + return; + } + } + + @Override + public void ack(Object msgId) { + ++ackCount; + super.ack(msgId); + } + +} diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/spout/FileReadSpout.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/spout/FileReadSpout.java new file mode 100644 index 00000000000..a819fa25e18 --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/spout/FileReadSpout.java @@ -0,0 +1,140 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.storm.perf.spout; + +import java.io.BufferedReader; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import org.apache.storm.spout.SpoutOutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.base.BaseRichSpout; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Values; + +public class FileReadSpout extends BaseRichSpout { + public static final String FIELDS = "sentence"; + private static final long serialVersionUID = -2582705611472467172L; + private transient FileReader reader; + private String file; + private boolean ackEnabled = true; + private SpoutOutputCollector collector; + + private long count = 0; + + + public FileReadSpout(String file) { + this.file = file; + } + + // For testing + FileReadSpout(FileReader reader) { + this.reader = reader; + } + + public static List readLines(InputStream input) { + List lines = new ArrayList<>(); + try { + BufferedReader reader = new BufferedReader(new InputStreamReader(input)); + try { + String line; + while ((line = reader.readLine()) != null) { + lines.add(line); + } + } catch (IOException e) { + throw new RuntimeException("Reading file failed", e); + } finally { + reader.close(); + } + } catch (IOException e) { + throw new RuntimeException("Error closing reader", e); + } + return lines; + } + + @Override + public void open(Map conf, TopologyContext context, + SpoutOutputCollector collector) { + this.collector = collector; + Object ackObj = conf.get("topology.acker.executors"); + if (ackObj != null && ackObj.equals(0)) { + this.ackEnabled = false; + } + // for tests, reader will not be null + if (this.reader == null) { + this.reader = new FileReader(this.file); + } + } + + @Override + public void nextTuple() { + if (ackEnabled) { + collector.emit(new Values(reader.nextLine()), count); + count++; + } else { + collector.emit(new Values(reader.nextLine())); + } + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields(FIELDS)); + } + + public static class FileReader implements Serializable { + + private static final long serialVersionUID = -7012334600647556267L; + + public final String file; + private List contents = null; + private int index = 0; + private int limit = 0; + + public FileReader(String file) { + this.file = file; + if (this.file != null) { + try { + this.contents = readLines(new FileInputStream(this.file)); + } catch (IOException e) { + e.printStackTrace(); + throw new IllegalArgumentException("Cannot open file " + file, e); + } + this.limit = contents.size(); + } else { + throw new IllegalArgumentException("file name cannot be null"); + } + } + + public String nextLine() { + if (index >= limit) { + index = 0; + } + String line = contents.get(index); + index++; + return line; + } + + } +} diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/spout/StringGenSpout.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/spout/StringGenSpout.java new file mode 100755 index 00000000000..01964ea5966 --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/spout/StringGenSpout.java @@ -0,0 +1,91 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.storm.perf.spout; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import org.apache.commons.lang.RandomStringUtils; +import org.apache.storm.spout.SpoutOutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.base.BaseRichSpout; +import org.apache.storm.tuple.Fields; + +/** + * Spout pre-computes a list with 30k fixed length random strings. Emits sequentially from this list, over and over again. + */ + +public class StringGenSpout extends BaseRichSpout { + + private static final String DEFAULT_FIELD_NAME = "str"; + private final int strCount = 30_000; + ArrayList records; + private int strLen; + private String fieldName = DEFAULT_FIELD_NAME; + private SpoutOutputCollector collector = null; + private int curr = 0; + private int count = 0; + + public StringGenSpout(int strLen) { + this.strLen = strLen; + } + + private static ArrayList genStringList(int strLen, int count) { + ArrayList result = new ArrayList(count); + for (int i = 0; i < count; i++) { + result.add(RandomStringUtils.random(strLen)); + } + return result; + } + + public StringGenSpout withFieldName(String fieldName) { + this.fieldName = fieldName; + return this; + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields(fieldName)); + } + + @Override + public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { + this.records = genStringList(strLen, strCount); + + this.collector = collector; + } + + @Override + public void nextTuple() { + List tuple; + if (curr < strCount) { + tuple = Collections.singletonList((Object) records.get(curr)); + ++curr; + collector.emit(tuple, ++count); + } + } + + + @Override + public void ack(Object msgId) { + super.ack(msgId); + } +} diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/spout/WordGenSpout.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/spout/WordGenSpout.java new file mode 100644 index 00000000000..a6a1fc9b391 --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/spout/WordGenSpout.java @@ -0,0 +1,114 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.storm.perf.spout; + +import java.io.BufferedReader; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.util.ArrayList; +import java.util.Map; +import org.apache.storm.perf.ThroughputMeter; +import org.apache.storm.perf.utils.Helper; +import org.apache.storm.spout.SpoutOutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.base.BaseRichSpout; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Values; + +public class WordGenSpout extends BaseRichSpout { + public static final String FIELDS = "word"; + private static final long serialVersionUID = -2582705611472467172L; + private String file; + private boolean ackEnabled = true; + private SpoutOutputCollector collector; + + private long count = 0; + private int index = 0; + private ThroughputMeter emitMeter; + private ArrayList words; + + + public WordGenSpout(String file) { + this.file = file; + } + + /** + * Reads text file and extracts words from each line. + * + * @return a list of all (non-unique) words + */ + public static ArrayList readWords(String file) { + ArrayList lines = new ArrayList<>(); + try { + FileInputStream input = new FileInputStream(file); + BufferedReader reader = new BufferedReader(new InputStreamReader(input)); + try { + String line; + while ((line = reader.readLine()) != null) { + for (String word : line.split("\\s+")) { + lines.add(word); + } + } + } catch (IOException e) { + throw new RuntimeException("Reading file failed", e); + } finally { + reader.close(); + } + } catch (IOException e) { + throw new RuntimeException("Error closing reader", e); + } + return lines; + } + + @Override + public void open(Map conf, + TopologyContext context, + SpoutOutputCollector collector) { + this.collector = collector; + Integer ackers = Helper.getInt(conf, "topology.acker.executors", 0); + if (ackers.equals(0)) { + this.ackEnabled = false; + } + // for tests, reader will not be null + words = readWords(file); + emitMeter = new ThroughputMeter("WordGenSpout emits"); + } + + @Override + public void nextTuple() { + index = (index < words.size() - 1) ? index + 1 : 0; + String word = words.get(index); + if (ackEnabled) { + collector.emit(new Values(word), count); + count++; + } else { + collector.emit(new Values(word)); + } + emitMeter.record(); + + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields(FIELDS)); + } + +} diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/toolstest/Cons.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/toolstest/Cons.java new file mode 100644 index 00000000000..257e9e77217 --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/toolstest/Cons.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.storm.perf.toolstest; + +import java.util.concurrent.locks.LockSupport; +import org.apache.storm.utils.MutableLong; +import org.jctools.queues.MpscArrayQueue; + +class Cons extends MyThd { + public final MutableLong counter = new MutableLong(0); + private final MpscArrayQueue queue; + + Cons(MpscArrayQueue queue) { + super("Consumer"); + this.queue = queue; + } + + @Override + public void run() { + Handler handler = new Handler(); + long start = System.currentTimeMillis(); + + while (!halt) { + int x = queue.drain(handler); + if (x == 0) { + LockSupport.parkNanos(1); + } else { + counter.increment(); + } + } + runTime = System.currentTimeMillis() - start; + } + + @Override + public long getCount() { + return counter.get(); + } + + private class Handler implements org.jctools.queues.MessagePassingQueue.Consumer { + @Override + public void accept(Object event) { + counter.increment(); + } + } +} diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/toolstest/JCToolsPerfTest.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/toolstest/JCToolsPerfTest.java new file mode 100644 index 00000000000..b5afe28b6a6 --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/toolstest/JCToolsPerfTest.java @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.storm.perf.toolstest; + +import org.jctools.queues.MpscArrayQueue; + +@SuppressWarnings("checkstyle:AbbreviationAsWordInName") +public class JCToolsPerfTest { + public static void main(String[] args) throws Exception { + // oneProducer1Consumer(); + // twoProducer1Consumer(); + // threeProducer1Consumer(); + // oneProducer2Consumers(); + // producerFwdConsumer(); + + // JCQueue spoutQ = new JCQueue("spoutQ", 1024, 100, 0); + // JCQueue ackQ = new JCQueue("ackQ", 1024, 100, 0); + // + // final AckingProducer ackingProducer = new AckingProducer(spoutQ, ackQ); + // final Acker acker = new Acker(ackQ, spoutQ); + // + // runAllThds(ackingProducer, acker); + + while (true) { + Thread.sleep(1000); + } + + } + + private static void oneProducer1Consumer() { + MpscArrayQueue q1 = new MpscArrayQueue(50_000); + + final Prod prod1 = new Prod(q1); + final Cons cons1 = new Cons(q1); + + runAllThds(prod1, cons1); + } + + private static void twoProducer1Consumer() { + MpscArrayQueue q1 = new MpscArrayQueue(50_000); + + final Prod prod1 = new Prod(q1); + final Prod prod2 = new Prod(q1); + final Cons cons1 = new Cons(q1); + + runAllThds(prod1, cons1, prod2); + } + + private static void threeProducer1Consumer() { + MpscArrayQueue q1 = new MpscArrayQueue(50_000); + + final Prod prod1 = new Prod(q1); + final Prod prod2 = new Prod(q1); + final Prod prod3 = new Prod(q1); + final Cons cons1 = new Cons(q1); + + runAllThds(prod1, prod2, prod3, cons1); + } + + + private static void oneProducer2Consumers() { + MpscArrayQueue q1 = new MpscArrayQueue(50_000); + MpscArrayQueue q2 = new MpscArrayQueue(50_000); + + final Prod2 prod1 = new Prod2(q1, q2); + final Cons cons1 = new Cons(q1); + final Cons cons2 = new Cons(q2); + + runAllThds(prod1, cons1, cons2); + } + + public static void runAllThds(MyThd... threads) { + for (Thread thread : threads) { + thread.start(); + } + addShutdownHooks(threads); + } + + public static void addShutdownHooks(MyThd... threads) { + + Runtime.getRuntime().addShutdownHook(new Thread(() -> { + try { + System.err.println("Stopping"); + for (MyThd thread : threads) { + thread.halt = true; + } + + for (Thread thread : threads) { + System.err.println("Waiting for " + thread.getName()); + thread.join(); + } + + for (MyThd thread : threads) { + System.err.printf("%s : %d, Throughput: %,d \n", thread.getName(), thread.count, thread.throughput()); + } + } catch (InterruptedException e) { + return; + } + })); + + } + +} + + + diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/toolstest/MyThd.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/toolstest/MyThd.java new file mode 100644 index 00000000000..0922c23fc66 --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/toolstest/MyThd.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.storm.perf.toolstest; + +abstract class MyThd extends Thread { + public long count = 0; + public long runTime = 0; + public boolean halt = false; + + MyThd(String thdName) { + super(thdName); + } + + public long throughput() { + return getCount() / (runTime / 1000); + } + + public long getCount() { + return count; + } +} diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/toolstest/Prod.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/toolstest/Prod.java new file mode 100644 index 00000000000..cfb1bf2ff0f --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/toolstest/Prod.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.storm.perf.toolstest; + +import org.jctools.queues.MpscArrayQueue; + +class Prod extends MyThd { + private final MpscArrayQueue queue; + + Prod(MpscArrayQueue queue) { + super("Producer"); + this.queue = queue; + } + + @Override + public void run() { + long start = System.currentTimeMillis(); + + while (!halt) { + ++count; + while (!queue.offer(count)) { + if (interrupted()) { + return; + } + } + } + runTime = System.currentTimeMillis() - start; + } + +} \ No newline at end of file diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/toolstest/Prod2.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/toolstest/Prod2.java new file mode 100644 index 00000000000..e0eea199f57 --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/toolstest/Prod2.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.storm.perf.toolstest; + +import org.jctools.queues.MpscArrayQueue; + +/** + * Writes to two queues. + */ +class Prod2 extends MyThd { + private final MpscArrayQueue q1; + private final MpscArrayQueue q2; + + Prod2(MpscArrayQueue q1, MpscArrayQueue q2) { + super("Producer2"); + this.q1 = q1; + this.q2 = q2; + } + + @Override + public void run() { + long start = System.currentTimeMillis(); + + while (!halt) { + q1.offer(++count); + q2.offer(count); + } + runTime = System.currentTimeMillis() - start; + } +} diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/utils/BasicMetricsCollector.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/utils/BasicMetricsCollector.java new file mode 100755 index 00000000000..735abe01a86 --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/utils/BasicMetricsCollector.java @@ -0,0 +1,269 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.storm.perf.utils; + +import java.io.PrintWriter; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import org.apache.log4j.Logger; +import org.apache.storm.generated.Nimbus; +import org.apache.storm.utils.Utils; + +public class BasicMetricsCollector implements AutoCloseable { + /* headers */ + public static final String TIME = "elapsed (sec)"; + public static final String TIME_FORMAT = "%d"; + public static final String TOTAL_SLOTS = "total_slots"; + public static final String USED_SLOTS = "used_slots"; + public static final String WORKERS = "workers"; + public static final String TASKS = "tasks"; + public static final String EXECUTORS = "executors"; + public static final String TRANSFERRED = "transferred (messages)"; + public static final String XSFER_RATE = "transfer rate (messages/s)"; + public static final String SPOUT_EXECUTORS = "spout_executors"; + public static final String SPOUT_TRANSFERRED = "spout_transferred (messages)"; + public static final String SPOUT_ACKED = "spout_acks"; + public static final String SPOUT_THROUGHPUT = "spout_throughput (acks/s)"; + public static final String SPOUT_AVG_COMPLETE_LATENCY = "spout_avg_complete_latency(ms)"; + public static final String SPOUT_AVG_LATENCY_FORMAT = "%.3f"; + public static final String SPOUT_MAX_COMPLETE_LATENCY = "spout_max_complete_latency(ms)"; + public static final String SPOUT_MAX_LATENCY_FORMAT = "%.3f"; + private static final Logger LOG = Logger.getLogger(BasicMetricsCollector.class); + final MetricsCollectorConfig config; + // final StormTopology topology; + final Set header = new LinkedHashSet(); + final Map metrics = new HashMap(); + final boolean collectTopologyStats; + final boolean collectExecutorStats; + final boolean collectThroughput; + final boolean collectSpoutThroughput; + final boolean collectSpoutLatency; + int lineNumber = 0; + boolean first = true; + private PrintWriter dataWriter; + private long startTime = 0; + private MetricsSample lastSample; + private MetricsSample curSample; + private double maxLatency = 0; + + public BasicMetricsCollector(String topoName, Map topoConfig) { + Set items = getMetricsToCollect(); + this.config = new MetricsCollectorConfig(topoName, topoConfig); + collectTopologyStats = collectTopologyStats(items); + collectExecutorStats = collectExecutorStats(items); + collectThroughput = collectThroughput(items); + collectSpoutThroughput = collectSpoutThroughput(items); + collectSpoutLatency = collectSpoutLatency(items); + dataWriter = new PrintWriter(System.err); + } + + private Set getMetricsToCollect() { + Set result = new HashSet<>(); + result.add(MetricsItem.ALL); + return result; + } + + public void collect(Nimbus.Iface client) { + try { + if (!first) { + this.lastSample = this.curSample; + this.curSample = MetricsSample.factory(client, config.name); + updateStats(dataWriter); + writeLine(dataWriter); + } else { + LOG.info("Getting baseline metrics sample."); + writeHeader(dataWriter); + this.curSample = MetricsSample.factory(client, config.name); + first = false; + startTime = System.currentTimeMillis(); + } + } catch (Exception e) { + LOG.error("storm metrics failed! ", e); + } + } + + @Override + public void close() { + dataWriter.close(); + } + + boolean updateStats(PrintWriter writer) + throws Exception { + if (collectTopologyStats) { + updateTopologyStats(); + } + if (collectExecutorStats) { + updateExecutorStats(); + } + return true; + } + + void updateTopologyStats() { + long timeTotal = System.currentTimeMillis() - startTime; + int numWorkers = this.curSample.getNumWorkers(); + int numExecutors = this.curSample.getNumExecutors(); + int numTasks = this.curSample.getNumTasks(); + metrics.put(TIME, String.format(TIME_FORMAT, timeTotal / 1000)); + metrics.put(WORKERS, Integer.toString(numWorkers)); + metrics.put(EXECUTORS, Integer.toString(numExecutors)); + metrics.put(TASKS, Integer.toString(numTasks)); + } + + void updateExecutorStats() { + long timeDiff = this.curSample.getSampleTime() - this.lastSample.getSampleTime(); + long transferredDiff = this.curSample.getTotalTransferred() - this.lastSample.getTotalTransferred(); + long throughput = transferredDiff / (timeDiff / 1000); + + long spoutDiff = this.curSample.getSpoutTransferred() - this.lastSample.getSpoutTransferred(); + long spoutAckedDiff = this.curSample.getTotalAcked() - this.lastSample.getTotalAcked(); + long spoutThroughput = spoutDiff / (timeDiff / 1000); + + if (collectThroughput) { + metrics.put(TRANSFERRED, Long.toString(transferredDiff)); + metrics.put(XSFER_RATE, Long.toString(throughput)); + } + + if (collectSpoutThroughput) { + + metrics.put(SPOUT_EXECUTORS, Integer.toString(this.curSample.getSpoutExecutors())); + metrics.put(SPOUT_TRANSFERRED, Long.toString(spoutDiff)); + metrics.put(SPOUT_ACKED, Long.toString(spoutAckedDiff)); + metrics.put(SPOUT_THROUGHPUT, Long.toString(spoutThroughput)); + } + + + if (collectSpoutLatency) { + double latency = this.curSample.getTotalLatency(); + if (latency > this.maxLatency) { + this.maxLatency = latency; + } + metrics.put(SPOUT_AVG_COMPLETE_LATENCY, + String.format(SPOUT_AVG_LATENCY_FORMAT, latency)); + metrics.put(SPOUT_MAX_COMPLETE_LATENCY, + String.format(SPOUT_MAX_LATENCY_FORMAT, this.maxLatency)); + + } + } + + void writeHeader(PrintWriter writer) { + header.add(TIME); + if (collectTopologyStats) { + header.add(WORKERS); + header.add(TASKS); + header.add(EXECUTORS); + } + + if (collectThroughput) { + header.add(TRANSFERRED); + header.add(XSFER_RATE); + } + + if (collectSpoutThroughput) { + header.add(SPOUT_EXECUTORS); + header.add(SPOUT_TRANSFERRED); + header.add(SPOUT_ACKED); + header.add(SPOUT_THROUGHPUT); + } + + if (collectSpoutLatency) { + header.add(SPOUT_AVG_COMPLETE_LATENCY); + header.add(SPOUT_MAX_COMPLETE_LATENCY); + } + + writer.println( + "\n------------------------------------------------------------------------------------------------------------------"); + String str = Utils.join(header, ","); + writer.println(str); + writer + .println("------------------------------------------------------------------------------------------------------------------"); + writer.flush(); + } + + void writeLine(PrintWriter writer) { + List line = new LinkedList(); + for (String h : header) { + line.add(metrics.get(h)); + } + String str = Utils.join(line, ","); + writer.println(str); + writer.flush(); + } + + boolean collectTopologyStats(Set items) { + return items.contains(MetricsItem.ALL) + || items.contains(MetricsItem.TOPOLOGY_STATS); + } + + boolean collectExecutorStats(Set items) { + return items.contains(MetricsItem.ALL) + || items.contains(MetricsItem.XSFER_RATE) + || items.contains(MetricsItem.SPOUT_LATENCY); + } + + boolean collectThroughput(Set items) { + return items.contains(MetricsItem.ALL) + || items.contains(MetricsItem.XSFER_RATE); + } + + boolean collectSpoutThroughput(Set items) { + return items.contains(MetricsItem.ALL) + || items.contains(MetricsItem.SPOUT_THROUGHPUT); + } + + boolean collectSpoutLatency(Set items) { + return items.contains(MetricsItem.ALL) + || items.contains(MetricsItem.SPOUT_LATENCY); + } + + public enum MetricsItem { + TOPOLOGY_STATS, + XSFER_RATE, + SPOUT_THROUGHPUT, + SPOUT_LATENCY, + ALL + } + + public static class MetricsCollectorConfig { + private static final Logger LOG = Logger.getLogger(MetricsCollectorConfig.class); + + // storm configuration + public final Map topoConfig; + // storm topology name + public final String name; + // benchmark label + public final String label; + + public MetricsCollectorConfig(String topoName, Map topoConfig) { + this.topoConfig = topoConfig; + String labelStr = (String) topoConfig.get("benchmark.label"); + this.name = topoName; + if (labelStr == null) { + LOG.warn("'benchmark.label' not found in config. Defaulting to topology name"); + labelStr = this.name; + } + this.label = labelStr; + } + } // MetricsCollectorConfig + +} diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/utils/Helper.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/utils/Helper.java new file mode 100755 index 00000000000..a573308b607 --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/utils/Helper.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.storm.perf.utils; + +import java.util.Map; +import org.apache.storm.StormSubmitter; +import org.apache.storm.generated.KillOptions; +import org.apache.storm.generated.Nimbus; +import org.apache.storm.generated.StormTopology; +import org.apache.storm.utils.NimbusClient; +import org.apache.storm.utils.ObjectReader; +import org.apache.storm.utils.Utils; + + +public class Helper { + + public static void kill(Nimbus.Iface client, String topoName) throws Exception { + KillOptions opts = new KillOptions(); + opts.set_wait_secs(0); + client.killTopologyWithOpts(topoName, opts); + } + + public static int getInt(Map map, Object key, int def) { + return ObjectReader.getInt(Utils.get(map, key, def)); + } + + public static String getStr(Map map, Object key) { + return (String) map.get(key); + } + + public static void collectMetricsAndKill(String topologyName, Integer pollInterval, int duration) throws Exception { + Map clusterConf = Utils.readStormConfig(); + Nimbus.Iface client = NimbusClient.Builder.withConf(clusterConf).build().getClient(); + try (BasicMetricsCollector metricsCollector = new BasicMetricsCollector(topologyName, clusterConf)) { + + if (duration > 0) { + int times = duration / pollInterval; + metricsCollector.collect(client); + for (int i = 0; i < times; i++) { + Thread.sleep(pollInterval * 1000); + metricsCollector.collect(client); + } + } else { + while (true) { //until Ctrl-C + metricsCollector.collect(client); + Thread.sleep(pollInterval * 1000); + } + } + } finally { + kill(client, topologyName); + } + } + + /** + * Kill topo on Ctrl-C. + */ + public static void setupShutdownHook(final String topoName) { + Map clusterConf = Utils.readStormConfig(); + final Nimbus.Iface client = NimbusClient.Builder.withConf(clusterConf).build().getClient(); + Runtime.getRuntime().addShutdownHook(new Thread() { + @Override + public void run() { + try { + System.out.println("Killing..."); + Helper.kill(client, topoName); + System.out.println("Killed Topology"); + } catch (Exception e) { + e.printStackTrace(); + } + } + }); + } + + public static void runOnClusterAndPrintMetrics(int durationSec, String topoName, Map topoConf, StormTopology topology) + throws Exception { + // submit topology + StormSubmitter.submitTopologyWithProgressBar(topoName, topoConf, topology); + setupShutdownHook(topoName); // handle Ctrl-C + + // poll metrics every minute, then kill topology after specified duration + Integer pollIntervalSec = 60; + collectMetricsAndKill(topoName, pollIntervalSec, durationSec); + } +} diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/utils/IdentityBolt.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/utils/IdentityBolt.java new file mode 100755 index 00000000000..df27263b150 --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/utils/IdentityBolt.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.storm.perf.utils; + +import java.util.Map; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.base.BaseRichBolt; +import org.apache.storm.tuple.Tuple; + +public class IdentityBolt extends BaseRichBolt { + private OutputCollector collector; + + @Override + public void prepare(Map topoConf, TopologyContext context, OutputCollector collector) { + this.collector = collector; + } + + @Override + public void execute(Tuple tuple) { + collector.emit(tuple, tuple.getValues()); + collector.ack(tuple); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + } +} + diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/utils/MetricsSample.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/utils/MetricsSample.java new file mode 100755 index 00000000000..fd9f042d3aa --- /dev/null +++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/utils/MetricsSample.java @@ -0,0 +1,230 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.storm.perf.utils; + +import java.util.List; +import java.util.Map; +import org.apache.storm.generated.ClusterSummary; +import org.apache.storm.generated.ExecutorSpecificStats; +import org.apache.storm.generated.ExecutorStats; +import org.apache.storm.generated.ExecutorSummary; +import org.apache.storm.generated.Nimbus; +import org.apache.storm.generated.SpoutStats; +import org.apache.storm.generated.TopologyInfo; +import org.apache.storm.generated.TopologySummary; +import org.apache.storm.utils.Utils; + +public class MetricsSample { + + private long sampleTime = -1L; + private long totalTransferred = 0L; + private long totalEmitted = 0L; + private long totalAcked = 0L; + private long totalFailed = 0L; + + private double totalLatency; + + private long spoutEmitted = 0L; + private long spoutTransferred = 0L; + private int spoutExecutors = 0; + + private int numSupervisors = 0; + private int numWorkers = 0; + private int numTasks = 0; + private int numExecutors = 0; + + private int totalSlots = 0; + private int usedSlots = 0; + + public static MetricsSample factory(Nimbus.Iface client, String topologyName) throws Exception { + // "************ Sampling Metrics ***************** + + // get topology info + TopologySummary topSummary = client.getTopologySummaryByName(topologyName); + int topologyExecutors = topSummary.get_num_executors(); + int topologyWorkers = topSummary.get_num_workers(); + int topologyTasks = topSummary.get_num_tasks(); + TopologyInfo topInfo = client.getTopologyInfo(topSummary.get_id()); + + MetricsSample sample = getMetricsSample(topInfo); + sample.numWorkers = topologyWorkers; + sample.numExecutors = topologyExecutors; + sample.numTasks = topologyTasks; + return sample; + } + + private static MetricsSample getMetricsSample(TopologyInfo topInfo) { + List executorSummaries = topInfo.get_executors(); + + // totals + long totalTransferred = 0L; + long totalEmitted = 0L; + long totalAcked = 0L; + long totalFailed = 0L; + + // number of spout executors + int spoutExecCount = 0; + double spoutLatencySum = 0.0; + long spoutTransferred = 0L; + + // Executor summaries + for (ExecutorSummary executorSummary : executorSummaries) { + ExecutorStats executorStats = executorSummary.get_stats(); + if (executorStats == null) { + continue; + } + + ExecutorSpecificStats executorSpecificStats = executorStats.get_specific(); + if (executorSpecificStats == null) { + // bail out + continue; + } + + // transferred totals + Map> transferred = executorStats.get_transferred(); + Map txMap = transferred.get(":all-time"); + if (txMap == null) { + continue; + } + for (String key : txMap.keySet()) { + // todo, ignore the master batch coordinator ? + if (!Utils.isSystemId(key)) { + Long count = txMap.get(key); + totalTransferred += count; + if (executorSpecificStats.is_set_spout()) { + spoutTransferred += count; + } + } + } + + // we found a spout + if (executorSpecificStats.isSet(2)) { // spout + + SpoutStats spoutStats = executorSpecificStats.get_spout(); + Map acked = spoutStats.get_acked().get(":all-time"); + if (acked != null) { + for (String key : acked.keySet()) { + totalAcked += acked.get(key); + } + } + + Map failed = spoutStats.get_failed().get(":all-time"); + if (failed != null) { + for (String key : failed.keySet()) { + totalFailed += failed.get(key); + } + } + + Double total = 0d; + Map vals = spoutStats.get_complete_ms_avg().get(":all-time"); + if (vals != null) { + for (String key : vals.keySet()) { + total += vals.get(key); + } + Double latency = total / vals.size(); + spoutLatencySum += latency; + } + + spoutExecCount++; + } + + + } // end executor summary + + MetricsSample ret = new MetricsSample(); + ret.totalEmitted = totalEmitted; + ret.totalTransferred = totalTransferred; + ret.totalAcked = totalAcked; + ret.totalFailed = totalFailed; + ret.totalLatency = spoutLatencySum / spoutExecCount; + + long spoutEmitted = 0L; + ret.spoutEmitted = spoutEmitted; + ret.spoutTransferred = spoutTransferred; + ret.sampleTime = System.currentTimeMillis(); + // ret.numSupervisors = clusterSummary.get_supervisors_size(); + ret.numWorkers = 0; + ret.numExecutors = 0; + ret.numTasks = 0; + ret.spoutExecutors = spoutExecCount; + return ret; + } + + // getters + public long getSampleTime() { + return sampleTime; + } + + public long getTotalTransferred() { + return totalTransferred; + } + + public long getTotalEmitted() { + return totalEmitted; + } + + public long getTotalAcked() { + return totalAcked; + } + + public long getTotalFailed() { + return totalFailed; + } + + public double getTotalLatency() { + return totalLatency; + } + + public long getSpoutEmitted() { + return spoutEmitted; + } + + public long getSpoutTransferred() { + return spoutTransferred; + } + + public int getNumSupervisors() { + return numSupervisors; + } + + public int getNumWorkers() { + return numWorkers; + } + + public int getNumTasks() { + return numTasks; + } + + public int getTotalSlots() { + return totalSlots; + } + + public int getSpoutExecutors() { + return this.spoutExecutors; + } + + public int getNumExecutors() { + return this.numExecutors; + } + + public int getUsedSlots() { + return this.usedSlots; + } + +} diff --git a/examples/storm-perf/src/main/sampledata/randomwords.txt b/examples/storm-perf/src/main/sampledata/randomwords.txt new file mode 100644 index 00000000000..1b800cfce43 --- /dev/null +++ b/examples/storm-perf/src/main/sampledata/randomwords.txt @@ -0,0 +1,14049 @@ +Hester starosta Bassaris guanajuatite pinulus debromination +outhue pamphlet Socraticism tomorrowness masa Bertat Vichyite +epauliere magnificently Mormyrus circumzenithal sapphiric Homoiousian misthread +Pithecolobium saguran hemimelus epauliere symbiogenetically +chronist cervisial euphemious topline sandbox pneumonalgia Babylonism sawdust by +figured impressor theologicopolitical scrat arsenide cockal roughcast introducer playfellowship +potentness Tsonecan ovopyriform inertly Bertat acocotl whitlowwort +Gothish Pishquow corona interruptedness glossing molecule preagitate +seelful unscourged swangy Mycogone bogydom nummi homotransplant unburnt +guitarist cockal Coniferae cylindric ovopyriform nonmanufacture monilioid hypochondriacism +underogating impairment genii Ludgatian palaeotheriodont marten +euphonym zoonitic cervisial Bertat guanajuatite Vaishnavism nonsuppressed subsequentially +scotale semiangle epididymitis bestill nonutilitarian lifter pyrocatechol obolus +scrat unpremonished ungrave undeterring winterproof zanyism Itea tonsure +antideflation rivethead seeingness snare refasten repealableness monstership bacillite +biodynamics antiadiaphorist strander undangered Hester +overwoven fallacious yawler seelful helminthagogic +quad putative whittle placatory vinny nebular sialadenitis +epauliere elemicin greave relaster pyroacetic nectopod nonexecutive weism dunkadoo +sarcologist tetragynian cubit frictionlessly magnetooptics undangered glyphography vinny +bladderwort unobservantness unswanlike terrestrially botchedly ultraobscure +papery mesymnion embryotic Bulanda pumpkinification Bassaris cromlech ungrave +Semecarpus commotion familist uncontradictableness manny vinny +eurythermal enation oratorship cretaceous hypoid +biopsic yote ineunt subangulated mutter barkometer +manny signifier brag heavenful Homoiousian undeterring michigan +ultratense Protestantize angina zenick swearingly unexplicit +monstership unrepealably shibuichi enhedge pachydermous bozal rechar lineamental +thermochemically nonutilitarian uninterpleaded Florissant ineunt jharal aneurism stachyuraceous +neuromimesis reperuse magnificently scyphostoma manganosiderite +merciful becomma unrealize Vaishnavism superindifference trillium soorkee rechar +lienteria Orbitolina Protestantize Gothish molossic nonsuppressed nonutilitarian +cyanoguanidine feasibleness unstipulated thermoresistant slait Inger characinoid +transudatory seizing ungreat almud monander abstractionism socioromantic +sequestrum sequestrum widdle angina bacillite +tum beatable seraphism bozal cattimandoo skyshine sombreroed emir +circular strander diplomatize veterinarian inertly prolificy prepavement +taver sequestrum corbel classificational mangonism strander swacking +unrevolting Sebastian spiranthic hepatorrhaphy oflete subangulated subtransverse +unharmed myesthesia trophonema bozal immatchable +uninhabitedness eristically Eryon allegedly daytime periclitation phallaceous +phytonic yote Serrifera nonmanufacture Animalivora +bozal gunshop ornithodelphous Tamil chorograph Quakerishly outwealth laryngic hepatorrhaphy +pinulus uninhabitedness endotheliomyoma unachievable Confervales ladhood chilblain +unachievable nonmanufacture rave Prosobranchiata dermorhynchous minniebush oinomancy overinstruct ventricous +precostal ununiformly mastication figured idiotize antihero +Kenipsim beneficent blurredness Dawsonia twinling phallaceous impugnation dermorhynchous +packsack zanyism breadwinner Megaluridae uncarefully zenick dishpan squdge +dialoguer theologicopolitical subfoliar mangonism mustafina champer +Consolamentum exploiter imperceptivity nectopod breadwinner floatability +heavenful theologicopolitical pyrocatechol ferrogoslarite infestation dermorhynchous lampyrine +shallowish crystallographical uninhabitedness cocksuredom yawler hysterolysis genii bromic +antalgol chronographic chasmy proboscidiform craglike stiffish +marshiness Scanic adz uninterpleaded trailmaking stormy impairment Dunlop pinulus +mechanist helminthagogic semantician ununiformly incomprehensible equiconvex adatom transcorporeal sertularian +migrainoid bacillite wandoo sapience counteralliance preparative unpeople aurothiosulphuric +deindividualization astucious rede schoolmasterism boor alveolite bogydom unaccessible tendomucoid +glyphography antihero furacious centrifugalization eurythermal atlantite counteractively metaphonical +Hydrangea spermaphyte canicule impugnation meloplasty introducer +lithotresis Effie saccharogenic arsenide porencephalous speckedness +bought patroller uvanite unevoked basto dehairer bought +posterishness overstaid exploiter mesymnion angina oratorize winterproof Ochnaceae iniquitously +jharal gallybeggar marshiness subdentate twinling redecrease ramosopalmate +chronist underskin wandoo diurnalness haply helpless chilblain unforkedness +epididymitis archesporial uncompromisingly mesymnion karyological yawler +ventricous horsefly bespin autobiographist deaf Lemuridae Caphtor misexposition +abscission masa scyphostoma pentafid nonlustrous lebensraum +posttraumatic uninductive sloped transude enation lithograph pachydermous papery +weism proacquittal greave Scorpaenidae arrowworm +velaric doubtingness Swaziland unstressedly coracomandibular +iniquitously suspend relaster dunkadoo Glecoma larklike uncompromisingly unscourged +macropterous reappreciate countergabion immatchable ovopyriform Russifier thiodiazole rizzomed sequacity +neuromimesis balladmonger bladderwort furacious nonpoisonous tetchy pumpkinification laryngic Effie +opacousness cacuminal unleavened lithotresis metaphonical antideflation feasibleness subfebrile +underskin unpatched Gothish stradametrical yeat +Dawsonia counteractively merciful doubtingness Spencerism havoc involatile precostal +Mormyrus calabazilla tomorn valvula glandularly supraoesophageal ungrave +overstaid throbless lampyrine phallaceous experientialist blurredness okonite +subdrainage periarthritis unrepealably enation pansophism +helminthagogic digitule quarried metastoma molecule Uraniidae harr mediateness slipped +Aktistetae diminutively cresylite antihero spiciferous ascitic +dehairer mangonism Lincolnlike prescriber templar +Ophiosaurus plerome tantivy diplomatize squdge venialness +imprescribable angiolymphoma seeingness exprobratory Glecoma mechanist avengeful diopside uncontradictableness +decidable parodist unexplicit physiologian Munychian parabolicness propheticism +homotransplant reperuse antiabolitionist sialadenitis plerome Dunlop uncontradictableness +pachydermous preoral chargeably sterilely aneurism cubby yeelaman +migrainoid totaquina benthonic centrifugalization pyroacetic nonutilitarian corona +interruptor diatomaceous oxyterpene beneficent returnability molecule +tantivy comism misthread cubby cretaceous ungrave +eristically waird overcontribute totaquina proboscidiform tramplike +counteralliance ungreat unchatteled bucketer bespin parastas defensibly chasmy +calabazilla cubit synovial bacterioblast neurodegenerative apopenptic asparaginic +inductivity undinted thermochemically antiabolitionist pendulant +pachydermous tum Pishquow inertly ventricous +manganosiderite skyshine Bishareen tailoress transcortical homeotypical glacierist +devilwise enhedge pendulant cervisial preagitate yote eucalypteol cornberry clanned +subfoliar impairment bromic Eryon centrifugalization mericarp folious +dithery exploiter liquidity imaginary bismuthiferous archididascalian monstership eer +astucious triradiated uncontradictableness pneumatotherapy overstudiousness fetlocked counteralliance Aplacentalia +ticktick antideflation larklike Macraucheniidae nonlustrous Confervales nonlustrous antideflation +extraorganismal apopenptic subdentate pachydermous metastoma abusiveness +Zuludom insatiately Passiflorales oinomancy antideflation fetlocked Consolamentum +dishpan cromlech sapphiric putative unreprimanded cumbrousness lifter agglomeratic karyological +laubanite Cephalodiscus Zuludom unbashfulness obispo tonsure incalculable +periclitation approbation imperceptivity sequentially wingable +arsenide feasibleness spiciferous choralcelo manilla undinted Jerusalem goladar stewardship +Bulanda visceral orthopedical antideflation epauliere codisjunct poleax kerykeion +by misexposition benzothiofuran macropterous gallybeggar sviatonosite +heliocentricism unpeople unlapsing chorograph transcortical unlapsing +sawdust byroad iniquitously omega subfoliar ethnocracy jajman aprosopia +Hester foursquare Helvidian diwata reformatory putative louse unscourged +Quakerishly prolificy avengeful commotion temporomastoid +cacuminal hypochondriacism cornberry brooky terrificness iniquitously undeterring +terrestrially seelful parabolicness benzoperoxide approbation +Machiavel subangulated overcrown yeat involatile +nummi stachyuraceous Mormyrus lebensraum chronist subfebrile barkometer +unpredict meloplasty characinoid hemimelus velaric aconitine figureheadship +impugnation sturdied Hester alen zanyism consumptional +horsefly enhedge projecting tomorrowness eer debromination doubtingness unpatched gemmeous +metrocratic gemmeous uncombable slait papery depressingly +flutist deindividualization scapuloradial perculsive selectivity Spencerism +equiconvex pamphlet cervisial micromembrane paleornithology serosanguineous cuproiodargyrite +reperuse quailberry diopside rivethead meloplasty incomprehensible +sangaree pelvimetry ploration dithery amplexifoliate Ludgatian +flatman enation cubit componental inventurous botchedly +autobiographist flushgate stiffish allectory ambitus triakistetrahedral unprovided chargeably +heavenful parquet equiconvex outwealth Pyrales unpeople diatomaceous canicule +Megaluridae twinling bucketer sertularian metaphrastical +breadwinner imaginary lithotresis selectivity Hysterocarpus Triconodonta steprelationship +uniarticular angiopathy laryngic preagitate synovial scabbardless socioromantic swoony inventurous +Vichyite liquidity bogydom autoschediastical scotale debellator +sequentially omega characinoid unisexuality parodist +cubit Alethea unevoked photoelasticity Ophiosaurus acidophile redesertion +depravity Homoiousian analgize venialness archistome orthopedical quadrennial affaite +technopsychology seeingness ambitus kerykeion abusiveness enterostomy pansophism appetible swearingly +paleornithology Vichyite corelysis affaite sesquiquintile incomprehensible +pompiloid saguran isopelletierin okonite tramplike signifier trip +jharal frenal playfellowship neuromimesis jirble cumbrousness depravity +splenauxe hypoplastral collegian nectopod metrocratic erythremia precostal +brooky gunshop Consolamentum Isokontae Shiah biopsic Savitar sturdied +sangaree sesquiquintile Ochnaceae ungrave whittle +parquet diatomaceous detractive flutist transcorporeal enterostomy +templar decardinalize undecorated parodist noreast sturdied noreast furacious uninductive +regardful unaccessible parabolicness Bishareen peptonate pseudohalogen velaric bacillite componental +impressor antineuritic waird winterproof expiscate +porriginous boser catabaptist pyrocatechol intuition +helpless bubble depressingly taver comism catabaptist +unleavened serosanguineous upcushion Fameuse groundneedle masa rivethead +amplexifoliate thiodiazole apocalypst enterostomy uvanite +catabaptist Aktistetae bladderwort inertly unforkedness Hester socioromantic +uncompromisingness sural piquantness appetible frameable waird diopside tricae +triakistetrahedral semantician aquiline Filipendula heavenful danseuse Tsonecan jajman waird +strander enhedge outwealth epauliere depravity calabazilla +yeat interruptor balanocele homotransplant upswell analgic Uraniidae +Dodecatheon equiconvex adatom knob soorkee cubby shola pleasurehood +unswanlike unpatched overcultured shola shellworker bucketer transude foursquare +tristich pneumonalgia velaric Aplacentalia sheepskin umangite lineamental zenick acidophile +ferrogoslarite ununiformly analgize rehabilitative tambo upswell Scorpaenidae +meriquinoidal yeelaman photoelasticity refasten embryotic guitarist metrocratic +pneumonalgia hackneyed gallybeggar foursquare valvula +preaffiliate diathermacy pyrocatechol brag parodist sandbox agglomeratic prescriptible sturdied +disilane ell scapuloradial havoc inferent sandbox galbulus Pithecolobium erythrodextrin +interfraternal masa whittle expiscate brutism oinomancy bucketer depravity +antideflation swacking cuproiodargyrite jajman Macraucheniidae temporomastoid +decardinalize louse coracomandibular bunghole velaric emir tambo Russifier trisilicic +classificational trillion diplomatize uncontradictableness subangulated arrowworm hemimelus marshiness penult +adz thermanesthesia Munnopsidae paranephros emir basto choralcelo Bulanda +autobiographist kerykeion symbiogenetically repealableness stroking stradametrical +overbuilt periclitation Pincian nonsuppressed warlike helpless +ineunt tramplike corona bucketer corona +superindifference pictorially aneurism chrysochrous comism jirble percent arsenide +Tamil vesperal bought pseudoxanthine danseuse unlapsing +sapphiric Mesua starosta wandoo antalgol kerykeion +raphis Dictograph unswanlike larklike hondo slangy subfebrile cuproiodargyrite +dispermy okonite circular terrestrially componental chrysochrous stronghearted sandbox adz +hepatorrhaphy superindifference inventurous tonsure wherefrom Orbitolina codisjunct +experientialist preagitate metoxazine mesymnion diwata regardful prolificy +diminutively heavenful octogynous unrevolting nonsuppressed +archistome idiotize sterilely neurodegenerative evictor +Sphenodontidae swearingly monilioid squit nonuple flatman +pentosuria strammel fossilism parastas characinoid +trillion semiangle hymnic trillium subsequentially ell cyanophilous farrantly balladmonger +sapphiric vesperal ethmopalatal dehairer intrabred Pyrales Homoiousian +louse ungrave ascitic bestill topline +potentness thorite repealableness antalgol pompiloid overcontribute orthopedical valvula +feasibleness Pincian appetible preoral ten Hysterocarpus Orbitolina undangered +erlking ornithodelphous bromate starer ventricous flutist Harpa folious +elastivity oversand heavenful ladhood eucalypteol havoc Fouquieria dosseret chooser +mutter reappreciate topsail heavenful archididascalian +spermaphyte untongued upcushion naught clanned +chilblain metrocratic jajman astucious chargeably beatable +nonpoisonous idiotize groundneedle ultraobscure dastardliness interfraternal giantly +sural spiciferous intuition lophotrichic pope mastication jharal chronist +circular twinling sud bestill trunnel proauction +paunchy pansophism overbuilt stroking electrotechnics +meriquinoidal leucophoenicite socioromantic impairment nummi +yeelaman spot pachydermous bot theologicopolitical pelvimetry unaccessible sloped +oxyterpene instructiveness trophonema nonexecutive critically decidable +ascitic paradisean corelysis noreast seeingness arduousness acocotl friarhood becomma +iniquitously cervisial octogynous inductivity Edo +Thraupidae Russifier prolificy stronghearted ununiformly +visceral visceral involatile introducer hyocholic +noncrystallized angiopathy preoral unpredict marshiness +Mycogone immatchable bogydom palaeotheriodont diathermacy laurinoxylon stroking saccharogenic arsenide +retinize Christianopaganism cacuminal cloy atlantite becomma vesperal nonpoisonous stentorophonic +canicule tum constitutor authorling antihero superindifference +toplike flatman bot saccharogenic swearingly +unlapsing docimastical arval laryngic characinoid technopsychology weism +Pishquow balladmonger coracomandibular glyphography merciful tingly Caphtor ungrave +leucophoenicite Effie prepavement angiolymphoma Confervales weism +decardinalize manilla endotheliomyoma Dawsonia japanned scrubbed unevoked boor +mangonism vinny plugger thiodiazole immatchable Spencerism seelful +warlike vinny pondside pondside Bermudian inventurous unurban pachydermous oratorship +pseudoxanthine ethnocracy theologal meriquinoidal saccharogenic sialadenitis tetrahedral pyxie +synovial predisputant ten unprovided Saponaria seeingness toplike +pondside fallacious enation valvula noreast bathysphere infestation +mammonish gymnastic deepmost widdle atlantite antivenin +Munnopsidae dunkadoo overstaid redescend uncombable foursquare thermoresistant gymnastic +introducer laurinoxylon decardinalize Joachimite cylindric subsequentially +orthopedical relaster generalizable trophonema coadvice eristically Vaishnavism +rosaniline Thraupidae monilioid extraorganismal predebit hypochondriacism marten mericarp +biventer undiffusive pendulant transcortical sombreroed cumbrousness pinulus ovoviviparous +asparaginic Machiavel lithograph parmelioid unleavened +warlike overcontribute Helvidian aurothiosulphuric dinical gallybeggar pamphlet swoony +coldfinch percent paleornithology Protestantize diwata +imaginary aurothiosulphuric cervisial prescriber rizzomed skyshine Socraticism Cimmerianism seelful +parquet poleax unevoked coadvice repealableness Munychian endotheliomyoma apopenptic eucalypteol +quintette pneumatotherapy ornithodelphous sirrah hoove +nonuple Swaziland planosubulate bromic potentness skyshine eulogization +smokefarthings stormy ineunt nonexecutive Ophiosaurus greave bonze raphis boser +elemicin ell hackneyed Haversian tomorrowness nonsuppressed embryotic daytime boser +heliocentricism antiscolic gelatinousness scrubbed overstudiousness lifter periclitation +tingly shellworker aneurism prospectiveness unefficient uninductive Spatangoidea culm +taurocolla adscendent inexistency quarried trabecular oratorship +vitally topsail raphis parastas eristically chordacentrum +Lincolnlike cornberry mammonish mesophyte overcrown quadrennial swacking mendacity macropterous +Tamil diopside pneumatotherapy comprovincial uncontradictableness Mormyrus +uninterpleaded starosta rosaniline triradiated prolificy +aquiline waird saccharogenic pachydermous Macraucheniidae neurodegenerative speckedness overcrown +infrastapedial percent thermanesthesia ticktick periclitation sequacity +agglomeratic centrifugalization unaccessible gelatinousness angiolymphoma Pithecolobium reeveland unfurbelowed beadroll +culm unrevolting theologicopolitical soorkee gemmeous avengeful +marten deindividualization bathysphere bettermost saccharogenic Passiflorales seditious mendacity erlking +bromate deepmost stachyuraceous characinoid floatability obolus +overcultured triradiated oblongly adatom stronghearted patroller Jerusalem +commotion cheesecutter semiangle uninterpleaded monander danseuse +bromate scyphostoma reeveland doina commotion parquet tonsure +antiadiaphorist Swaziland twinling stiffish exploiter +arrowworm uncontradictableness furacious pomiferous Harpa pope +yeat deindividualization monander wandoo subangulated Filipendula oxyterpene prepavement Christianopaganism +benzoperoxide Bushongo slangy larklike overstudiousness pentosuria unfulminated +mangonism mangonism overstudiousness bogydom ungrave oxyterpene repealableness +instructiveness balladmonger cattimandoo karyological magnetooptics misthread +roughcast theologal bucketer angiopathy theologicopolitical +spot seminonflammable hondo Eleusinian metaphonical elastivity predisputant +pentafid eer tailoress pictorially phlogisticate ploration nummi thiodiazole Joachimite +slait unstressedly bucketer Spencerism outguess tingly chasmy +depravity unscourged corona Gothish allegedly frenal +interruptor benzoperoxide mammonish slipped immatchable +impairment havoc plugger intuition equiconvex interruptedness guitarist diatomaceous dinical +constitutor Protestantize circular edificator sequestrum hogmace +reciprocation saguran Arneb cacuminal arval prepavement eulogization +toxihaemia seelful frameable Swaziland counteractively bacillite +gelatinousness uncombable sonable seraphism thorite byroad +oversand authorling umangite tartrous verbid +beneficent neurodegenerative toplike scyphostoma perculsive +airfreighter Alethea bucketer tonsure Confervales +feasibleness omega flatman throbless tomorn returnability diatomaceous +balladmonger epidymides sirrah terrificness disilane unurban +Confervales involatile overcultured astronomize Munnopsidae chrysochrous foursquare +seraphism preagitate helminthagogic benzoperoxide Glecoma Yannigan umangite veterinarian subofficer +overcultured airfreighter jirble thiodiazole stradametrical ipomoein cuproiodargyrite templar ambitus +umbellic toxihaemia Llandovery apopenptic jajman pope doubtingness pleasurehood Coniferae +abthainry Vaishnavism patroller verbid terrificness +apopenptic stewardship precostal Yannigan limpet weism Animalivora greave +Munnopsidae proacquittal extraorganismal undeterring drome +apocalypst entame benthonic bettermost proauction photoelasticity +dosseret eucalypteol liquidity scabbiness quintette velaric totaquina +splenauxe deaf asparaginic unefficient Auriculariales Thraupidae relaster harr preparative +pinulus jharal propodiale frameable angiopathy biventer harr +plugger propheticism speckedness Cercosporella slipped hymnic +lineamental strammel Filipendula monstership comprovincial sialadenitis sesquiquintile warlike +golem splenauxe dehairer gymnastic imaginary +cocksuredom impairment Uraniidae Uraniidae apocalypst phytonic Auriculariales osteopaedion Florissant +stormy sequentially introducer suspend Ochnaceae nonsuppressed Whilkut bromic antalgol +chronist umbellic speckedness discipular Dunlop sturdied scabbiness +sandbox phlogisticate endotheliomyoma hypoplastral tambo subtransverse +phallaceous pony obispo pony acidophile preparative semiangle depthwise brooky +exprobratory involatile prescriptible counterappellant ovoviviparous +seizing Llandovery seeingness wherefrom liquidity tetrahedral arrendation +emir larklike biventer proacquittal Swaziland unsupercilious Machiavel decidable rainproof +tartrous imperceptivity amylogenesis devilwise reappreciate pondside ultratense pleasurehood periclitation +enterostomy elastivity archesporial antiscolic drome unpremonished strander bladderwort sapphiric +serphoid pope potentness angiolymphoma cromlech photoelasticity +symbiogenetically metrocratic monstership diurnalness eucalypteol laurinoxylon +Orbitolina yawler Shiah metoxazine ovopyriform tricae thermanesthesia +nonexecutive elemicin pleurotropous cocksuredom reformatory knob projecting tomorn +Sebastian sandbox trisilicic nonlustrous morphiomania monilioid guanajuatite strander +ineunt upswell cromlech airfreighter sud liberatress speckedness digitule Serrifera +paradisean sarcologist manny rosaniline ungrave cocksuredom greave +Tsonecan whittle soorkee astronomize peristeropode +karyological rehabilitative pterostigma gala neurotrophic Cimmerianism Aplacentalia collegian clanned +signifier Hysterocarpus projecting patroller Scanic stiffish glossing starosta +parodist deaf diurnalness taurocolla gala goodwill golem undangered zanyism +nonrepetition beadroll spot yeelaman Sebastian circumzenithal Christianopaganism debellator cubby +rebilling omega eer apocalypst aurothiosulphuric proauction imperceptivity cinque chooser +fallacious cubby defensibly pompiloid participatingly archesporial trip defensibly +breadwinner cattimandoo abscission winterproof ultraobscure +isopelletierin Llandovery hysterogen preparative Bermudian +bugre unfulminated redesertion arteriasis frameable cockal Protestantize +calabazilla intuition pope naprapath porriginous Dictograph posttraumatic +japanned arrowworm laurinoxylon eristically defensibly toxoplasmosis pentafid +circumzenithal dastardliness basto Bertat oflete +guanajuatite redecrease experientialist thermochemically molossic +frameable rede Pithecolobium chasmy meriquinoidal regardful +evictor planosubulate testa entame orthopedical +scrat friarhood Spencerism Ghent pomiferous magnificently Triconodonta +reeveland gul omega valvulotomy sloped +lifter spermaphyte dithery experientialist unharmed ungreat seeingness Bertat Dawsonia +aneurism involatile pomiferous homeotypical archistome +overwoven anta stradametrical countergabion Aktistetae unsupercilious benzoperoxide +Ludgatian Kenipsim bought adz glacierist Christianopaganism +trabecular counterappellant outguess kerykeion counteractively Endomycetaceae exploiter +inventurous bettermost undiffusive nummi bespin +goladar macropterous clanned pinulus dipsomaniacal +frontoorbital analgic oblongly feasibleness Hester +laubanite dosseret phoenicochroite redecrease ungreat nonexecutive lyrebird redecrease +refasten airfreighter myesthesia heavenful whitlowwort depressingly scrat Aplacentalia antiscolic +testa aneurism cornberry Semecarpus Passiflorales +counteractively cromlech osteopaedion engrain uloid metopon euphemious metapolitics +cromlech chorograph tum undinted wherefrom nebular +Dictograph stronghearted Yannigan paradisean rebilling photoelasticity +physiologian scapuloradial interruptor unforkedness intuition +pachydermous eurythermal hondo chalcites outguess +bromic blurredness familist erythrodextrin bought ramosopalmate +returnability rivethead chasmy Arneb phoenicochroite oblongly +undeterring bozal counterappellant pictorially Pishquow +templar valvulotomy nummi valvulotomy Homoiousian limpet unschematized +brooky rotular proacquittal predebit componental thermochemically +rivethead lampyrine unsupercilious brooky ell coadvice Harpa +biopsic sesquiquintile coldfinch isopelletierin ultratense tickleproof propheticism misexposition +embryotic docimastical beadroll cubby champer groundneedle archistome devilwise autobiographist +generalizable Machiavel unpatched licitness abthainry vinegarish +Endomycetaceae characinoid atlantite elastivity Dawsonia mericarp stewardship dermorhynchous horsefly +Sphenodontidae dishpan farrantly almud glossing +archesporial monogoneutic upswell scrubbed opacousness drome lampyrine Ochnaceae +quad trillium veterinarian circumzenithal hypoid +choralcelo beneficent pamphlet epauliere lyrebird impairment +uncombable oratorship nonprofession osteopaedion Harpa +unpeople Tamil Pishquow helpless cacuminal +hemimelus omega metaphonical Chiasmodontidae ethmopalatal euphonym totaquina +manilla Scorpaenidae knob saccharogenic bestill tramplike +incomprehensible foursquare constitutor serosanguineous warriorwise brutism +flushgate prolificy spiciferous uncombable glossing +Inger deaf Aktistetae frenal chooser meriquinoidal hoove Pyrales +blightbird digitule guitarist chargeably Pithecolobium +ungouged Hydrangea veterinarian widdle patroller myesthesia sportswomanship ovopyriform elastivity +overwoven shibuichi volcano quadrennial bestill biventer michigan Aplacentalia bespin +gemmeous pachydermatoid corelysis umangite metoxazine enhedge unpatched +Russifier seizing louse componental morphiomania socioromantic infestation downthrust +affaite unrevolting obolus swoony circular kerykeion sertularian saguran +Uraniidae subtransverse pinulus scrat pumpkinification physiologian cylindric metaphonical masa +tambo pumpkinification Haversian brooky temporomastoid gymnastic octogynous Aktistetae skyshine +mesophyte scrubbed nonlustrous hysterogen seminonflammable volcano chrysochrous zanyism +japanned Eryon uncompromisingness corona sturdied relaster equiconvex agglomeratic undeterring +impressor oratorize outwealth stewardship marshiness pachydermatoid stiffish blurredness bugre +uloid limpet ambitus archididascalian parabolicness leucophoenicite +sombreroed blightbird uncarefully Eryon unrepealably dipsomaniacal overcultured +sedentariness tailoress tantivy ultrasystematic pelvimetry lithotresis rainproof meloplasty cyanoguanidine +authorling archistome parastas Bassaris eternal obolus unisexuality +ramosopalmate prezygapophysial manganosiderite mangonism hellbender volcano oflete +snare saguran chorograph brooky Protestantize cuproiodargyrite experientialist +gymnastic rehabilitative constitutor Pithecolobium signifier bonze Christianopaganism +affaite tomorn bismuthiferous airfreighter hypoid decardinalize boor asparaginic +unharmed pendulant allotropic infravaginal Serrifera hysterogen antiabolitionist mesymnion liquidity +quailberry blurredness chronist diopside spherulitic subdrainage subofficer pompiloid sequestrum +selectivity leucophoenicite serpentinic enation merciful Bishareen thermanesthesia +outwealth scrat knob marten Vichyite byroad endotheliomyoma +neurodegenerative Christianopaganism arrendation dinical plerome +Gothish sapphiric homotransplant manilla counteralliance experientialist hogmace tonsure +unleavened phoenicochroite glossing soorkee unexplicit uncarefully myesthesia shola cumbrousness +tomorrowness inductivity interruptedness golem metoxazine greave gul +warriorwise raphis antalgol monander brooky nonutilitarian slipped veterinarian by +retinize Arneb antalgol Cephalodiscus taver gemmeous +Vaishnavism intrabred smokefarthings Edo tambo tambo scyphostoma folious guanajuatite +Dadaism aprosopia psychofugal thermoresistant undeterring +impairment macropterous trillium cretaceous stiffish astucious piquantness +Megaluridae impugnation neuromimesis Scorpaenidae porriginous +Hysterocarpus sequacity antiscolic corbel scotching tomorn collegian +reconciliable semiangle manilla serphoid Dictograph codisjunct smokefarthings Isokontae +coldfinch triradiated circumzenithal reconciliable farrantly cuproiodargyrite prepavement leucophoenicite +gala becomma mastication reconciliable laurinoxylon provedore +untongued culm hellbender diplomatize japanned +exploiter thorite thorite trisilicic mesymnion undecorated toplike metopon scotching +cattimandoo rainproof reciprocation perfunctory Cephalodiscus generalizable undeterring proboscidiform Isokontae +flatman hemimelus papery valvula playfellowship carposporangial Spatangoidea tomorn +chorograph Confervales Haversian scyphostoma reperuse feasibleness paradisean +seelful subirrigate reappreciate signifier triradiated stachyuraceous pansophism uninductive +countergabion tantivy obolus unreprimanded trunnel genii +diatomaceous jajman Vichyite pumpkinification neurotrophic +planispheric groundneedle hackneyed parabolicness frontoorbital +taurocolla brutism sloped dermorhynchous imprescribable +lammy insatiately unlapsing countergabion neuromimesis +golem projecting ungouged Zuludom oxyterpene thermochemically throbless projecting +infestation rave silicize Alethea peptonate +unaccessible Sebastian Hydrangea debromination tartrous Oryzorictinae +subangulated characinoid valvula Prosobranchiata omega experientialist tartrous +Orbitolina inventurous phoenicochroite Quakerishly Haversian basto +angiolymphoma swearingly autoschediastical merciful Coniferae parodist +constitutor overcultured diurnalness expiscate undiffusive moodishness tonsure Munychian +tendomucoid ethmopalatal spookdom hogmace idiotize +bromic weism carposporangial imprescribable orthopedical ultrasystematic sesquiquintile +perculsive metaphonical byroad unrepealably disilane taurocolla prescriber wingable pony +angina uniarticular decidable uniarticular figured calycular +canicule unpredict tailoress scrubbed admissory +yote oversand agglomeratic proauction allegedly chacona +larklike swoony macropterous diopside Eryon spiciferous pentafid +spiranthic unfeeble Protestantize hellbender ell knob pamphlet +unpeople unurban Uraniidae inertly proacquittal seeingness +debellator trabecular hoove pneumatotherapy Arneb atlantite +perculsive intrabred pneumatotherapy infravaginal ineunt reperuse +valvulotomy autoschediastical homeotypical nebular infrastapedial angiolymphoma percent +ineunt coadvice Tamil ell autobiographist prefatorial +pamphlet mammonish unforkedness qualminess peristeropode upswell +paranephros proacquittal danseuse upcushion stroking +larklike interfraternal rede paunchy metaphrastical pamphlet +quadrennial perfunctory upswell kenno prescriber Itea crystallographical Gothish +enhedge chacona calycular dialoguer underskin subangulated Triphora antineuritic +sonable bismuthiferous ovoviviparous unstipulated predebit +meriquinoidal aurothiosulphuric reconciliable testa stroking temporomastoid +golem semantician massedly preagitate concretion sturdied centrifugalization subdentate clanned +scrat plerome Aplacentalia warlike rosaniline terrificness +repealableness sural omega piquantness uninductive liquidity sleigher subfebrile +unpremonished subfebrile lienteria laubanite Bertat valvulotomy sturdied +nonrepetition cresylite nectopod opacousness apocalypst unpeople +propheticism retinize pachydermatoid obispo familist bettermost mechanist repealableness technopsychology +phytonic posttraumatic biventer supraoesophageal transudatory +larklike nonprofession instructiveness culm Chiasmodontidae culm greave endotheliomyoma ultraobscure +autobiographist wingable poleax reappreciate tambo balladmonger louse ethmopalatal +monander archididascalian meriquinoidal undiffusive spiranthic gunshop ultraobscure Hu +figured perfunctory Yannigan preaffiliate eurythermal Florissant umbellic extraorganismal +roughcast Vaishnavism patroller playfellowship acidophile balanocele brag Ghent cyanoguanidine +centrifugalization playfellowship pompiloid blightbird hypochondriacism +photoelasticity pyxie tambo paranephros times deepmost +nebular minniebush unaccessible squit excerpt refective +incalculable Llandovery mesymnion comprovincial socioromantic reperuse Pishquow uloid molossic +winterproof redescend obolus galbulus subdentate +circular starosta unrealize Pyrales stentorophonic +trailmaking asparaginic templar instructiveness diopside misexposition misexposition +prescriptible exploiter autobiographist culm abscission goladar japanned porencephalous Haversian +biventer weism anta Gothish phoenicochroite +frontoorbital quarried saccharogenic Fouquieria unimmortal Lincolnlike +orthopedical asparaginic yawler pinulus unswanlike adatom +idiotize unbashfulness toxoplasmosis champer unchatteled inferent +nonrepetition metoxazine speckedness lifter pyroacetic hyocholic reformatory cyanoguanidine cretaceous +Zuludom scotale unsupercilious appetible omega seraphism +cumbrousness pachydermous Sphenodontidae Confervales retinize putative Christianopaganism unanatomized +squdge diurnalness louse lammy scrubbed +cumbrousness intuition incalculable parabolicness parmelioid pentagamist +prescriber cheesecutter frontoorbital psychofugal parabolicness overstaid ineunt quadrennial gymnastic +tailoress haply stroking provedore corbel parabolicness mendacity balanocele proacquittal +trip tristich Hester metopon cretaceous pelvimetry noreast Quakerishly tambo +pseudoxanthine spot seizing reformatory Spatangoidea Vaishnavism pleasurehood spiciferous +overstaid velaric Megaluridae Triconodonta subdrainage flatman +uncarefully glacierist botchedly orgiastic shola throbless +noncrystallized nigh golem ell impressor benthonic +subtransverse unleavened folious homeotypical dunkadoo oblongly +reperuse diatomaceous unefficient marten farrantly sandbox +Triconodonta rechar totaquina toplike Isokontae overstudiousness rebilling detractive +leucophoenicite abstractionism lithotresis balanocele sedentariness sonable +insatiately columniform Consolamentum aprosopia abscission ordinant cyanophilous centrifugalization +pseudohalogen cockal furacious antideflation pyroacetic quad +electrotechnics immatchable silverhead seizing adscendent +exploiter amylogenesis ungrave counteralliance planispheric +amylogenesis unpremonished wingable templar toxoplasmosis blightbird +shallowish acidophile Bertat superindifference Endomycetaceae squdge sirrah +eucalypteol silicize bought mangonism potentness reappreciate fossilism +mangonism nigh Filipendula cretaceous oflete pope unevoked +Bulanda hepatorrhaphy rede doina limpet erythrodextrin pterostigma pamphlet stereotypography +upswell sportswomanship tristich stachyuraceous quintette endotheliomyoma chooser noreast +spherulitic interruptor galbulus scrat hysterogen wandoo ventricous sandbox +infrastapedial choralcelo overwoven relaster Uraniidae flippantness undiffusive suspend +doubtingness silverhead bespin swoony generalizable consumptional +defensibly prepavement tristich glandularly ineunt charioteer yeelaman +omniscribent parmelioid archesporial slait oratorize toplike bacillite +entame redescend Chiasmodontidae terrificness temporomastoid calycular deaf socioromantic +bicorporeal metaphonical tambo Spatangoidea transcorporeal barkometer amender insatiately Lemuridae +Tsonecan craglike counterappellant Dodecatheon unprovided euphemize +yeelaman by stiffish adz mericarp knob autoschediastical redescend rave +mendacity porriginous metoxazine minniebush devilwise bettermost Dictograph abthainry archesporial +commotion homotransplant comprovincial ambitus unscourged mastication lophotrichic +rizzomed Hydrangea placatory arrendation brag antideflation eulogization +exprobratory eurythermal hyocholic Yannigan parodist prefatorial +docimastical overcontribute electrotechnics sombreroed inductivity scotale testa chacona retinize +Orbitolina acocotl enterostomy electrotechnics pope +noreast toxoplasmosis Inger docimastical metastoma antiadiaphorist +licitness weism veterinarian licitness homotransplant larklike Bassaris Aplacentalia +pterostigma aconitine aurothiosulphuric diopside Eleusinian Aktistetae isopelletierin +venialness ungouged yeat knob arrendation naprapath edificator uncontradictableness pachydermous +docimastical monogoneutic antineuritic sialadenitis cobeliever sedentariness +glacierist Pithecolobium avengeful meloplasty widdle cartful posttraumatic lyrebird +Animalivora involatile leucophoenicite Joachimite antideflation phallaceous Consolamentum overcontribute glacierist +pseudoxanthine pseudohalogen parastas bathysphere roughcast almud hysterogen +balanocele precostal adz hackneyed umbellic stroking starosta +vesperal elemicin supermarket sequacity provedore Florissant euphemize pachydermatoid +pony leucophoenicite piquantness infrastapedial retinize triradiated Caphtor Helvidian corona +pyxie unfeeble Yannigan slipped provedore figured selectivity dipsomaniacal Pincian +subofficer sequentially constitutor masa laryngic genii supermarket floatability +critically goodwill goladar Fameuse balladmonger +helminthagogic tingly propheticism predisputant tickleproof docimastical homotransplant valvula squit +unpredict chooser sloped lithotresis serosanguineous +raphis precostal electrotechnics deepmost parquet gunshop diplomatize propheticism subfoliar +uniarticular plerome uninhabitedness unevoked genii scrubbed +unanatomized chordacentrum archistome putative nigh Ludgatian +sapphiric concretion subtransverse sural arval laurinoxylon agglomeratic exprobratory ploration +projecting alen orchiocatabasis ovoviviparous Scorpaenidae +angiolymphoma rave Dawsonia entame terrificness undecorated yote okonite +Ghent mangonism lampyrine spiranthic absvolt marshiness +tendomucoid interfraternal Tamil bacillite thermochemically predisputant biodynamics mesymnion projecting +visceral diopside archididascalian repealableness rebilling +unaccessible friarhood basto proboscidiform Cephalodiscus rave triradiated overstaid +diatomaceous besagne molecule yeelaman overstaid imprescribable +impairment thermoresistant provedore diplomatize dermorhynchous atlantite adz +steprelationship reeveland boor Sphenodontidae cockstone +redesertion abthainry quarried diatomaceous Dadaism lifter quailberry Coniferae +unfurbelowed pumpkinification parquet vitally benzothiofuran autoschediastical +posttraumatic potentness Gothish bubble oversand glacierist obolus +karyological unobservantness scyphostoma subfebrile lithograph cobeliever biventer +nonpoisonous cervisial flutist cubby glacierist +yeat cresylite Zuludom packsack allegedly laurinoxylon +licitness untongued asparaginic manny hogmace throbless +molecule stroking ungreat Bishareen emir macropterous yeat entame tickleproof +alen unleavened prefatorial spermaphyte Arneb +sviatonosite sud Hu euphonym arval harr +metoxazine catabaptist cacuminal cromlech inertly slangy anta Florissant +figured Machiavel Fouquieria tartrous pachydermous cyanoguanidine perculsive +spermaphyte Dictograph thermoresistant paunchy hyocholic +lammy corona unrealize Eryon impugnation cyanoguanidine cockstone Pithecolobium Joachimite +trillium unfeeble subdrainage strammel liberatress +eristically frameable disilane ornithodelphous orthopedical amender omniscribent +comparability physiologian triakistetrahedral Hydrangea ethmopalatal cubby eulogization topsail +reperuse subdentate preparative Helvidian dosseret Yannigan phallaceous centrifugalization +helpless Savitar bacillite stereotypography counteractively Bertat +jajman psychofugal periclitation experientialist dermorhynchous pictorially atlantite undangered antiadiaphorist +electrotechnics Passiflorales unrevolting pelvimetry danseuse amender +myesthesia pneumatotherapy phytoma nonrepetition refasten jajman Munnopsidae speckedness michigan +tailoress gelatinousness cacuminal Ochnaceae sedentariness reeveland +eristically generalizable taver tomorrowness catabaptist endotheliomyoma terrestrially +Hydrangea tonsure Animalivora charioteer porriginous wingable +aprosopia unefficient sapphiric Lentibulariaceae trillium arteriasis allegedly prescriber +deaf dehairer antiabolitionist focaloid tantivy totaquina tailoress +monander squit extraorganismal classificational Triconodonta brag yote farrantly circular +stentorophonic flippantness atlantite uniarticular zenick Scorpaenidae +vinny testa Pishquow sheepskin osteopaedion Arneb jirble +pompiloid Dodecatheon flippantness unpeople Caphtor overcultured +limpet euphemize vinny seeingness unlapsing stiffish +Llandovery warlike bonze valvula cheesecutter Ophiosaurus eer equiconvex +Edo undeterring diopside Megaluridae octogynous vesperal serpentinic folious Babylonism +unexplicit lifter biopsic culm furacious scabbardless Mesua approbation +antineuritic hackneyed swacking discipular papery authorling tomorrowness orgiastic +zoonitic subirrigate Pishquow Florissant unstressedly Confervales +comprovincial overstaid engrain templar classificational imperceptivity +Machiavel antiadiaphorist Joachimite undecorated hypoid bunghole thorite topline tetragynian +jharal peristeropode triradiated excerpt doina seditious triradiated +pyrocatechol Gilaki hackneyed classificational clanned louse pelf flatman +Cephalodiscus plugger manganosiderite abstractionism scotale starosta fallacious +Thraupidae skyshine throbless heliocentricism brag oratorize clanned parquet arteriasis +ovoviviparous sapience toxihaemia generalizable aprosopia Ghent farrantly genii +Chiasmodontidae diwata arrendation subfebrile hemimelus chasmy vesperal +epidymides canicule pneumonalgia pompiloid acidophile ferrogoslarite weism beneficent +speckedness porriginous tartrous antiscolic manilla patroller sloped +friarhood temporomastoid trisilicic arrowworm pope clanned +molossic intuition bromic airfreighter decardinalize cornberry prescriptible rechar spiranthic +euphemize scrat quintette pansophism vitally brooky coracomandibular seminonflammable +exploiter rizzomed aconitine sapience cumbrousness ascitic unisexuality zenick craglike +approbation saponaceous quarried apopenptic frameable unbashfulness +docimastical astronomize slait periclitation untongued Lentibulariaceae meriquinoidal +Bishareen componental heavenful sonable unprovided proboscidiform +volcano interfraternal thiodiazole Dictograph parodist +sangaree unpeople subirrigate componental countergabion massedly abusiveness +potentness engrain flutist provedore seizing epauliere besagne +prolificy manganosiderite scrat subtransverse zanyism slipped pentosuria +superindifference analgic heavenful perculsive deepmost supraoesophageal diathermacy oratorship +Cephalodiscus toxihaemia astucious nonuple tetchy wemless Edo ambitus nigh +craglike leucophoenicite deindividualization swoony omega antihero swoony unisexuality +reformatory Machiavel danseuse spookdom nonprofession frictionlessly trunnel coadvice unaccessible +Arneb widdle nectopod charioteer bonze antivenin valvulotomy cockal +soorkee trailmaking overcontribute flushgate discipular hysterolysis +counterappellant haply homeotypical ethnocracy floatability corona +disilane generalizable quad sonable Dawsonia tetchy diatomaceous serpentinic mustafina +theologal astucious ethnocracy quintette downthrust pleurotropous inventurous familist +mediateness psychofugal Caphtor mangonism nectopod slait lifter +transude arduousness Hysterocarpus centrifugalization boor refective vesperal abusiveness +morphiomania botchedly sonable counteractively reeveland +inductivity balanocele magnificently Vaishnavism laurinoxylon predebit toxoplasmosis +Gothish visceral larklike Serrifera Ludgatian overcrown horsefly Prosobranchiata discipular +imaginary sapphiric metaphonical trailmaking overcultured +subtransverse monilioid gymnastic reappreciate brutism roughcast Bishareen +slipped countergabion giantly eucalypteol technopsychology orgiastic trophonema +inexistency roughcast approbation ventricous sertularian morphiomania plerome topline frictionlessly +Filipendula rehabilitative undercolored hellbender whitlowwort preaffiliate +brutism lithograph sturdied Christianopaganism coracomandibular scabbardless +antihero ipomoein Zuludom micromembrane downthrust steprelationship tartrous vinny autoschediastical +balanocele sesquiquintile japanned noreast moodishness preagitate +nonprofession divinator vinny provedore upcushion vitally +laubanite chrysochrous unevoked antalgol diminutively unleavened mastication unpredict +trophonema tum chalcites oinomancy ventricous counterappellant +pseudohalogen asparaginic toplike templar airfreighter Lemuridae timbermonger affaite +papery pseudoxanthine manny Semecarpus dishpan vinegarish calabazilla doina putative +velaric schoolmasterism sequestrum molossic sonable +insatiately Muscicapa unexplicit overcontribute monander precostal +piquantness squdge symbiogenetically divinator topsail +antihero gallybeggar hysterogen drome palaeotheriodont unsupercilious +pomiferous Zuludom gorilloid nonpoisonous gemmeous critically +cockstone massedly metastoma cacuminal Socraticism symbiogenetically pneumatotherapy Babylonism +porencephalous appetible scrubbed quailberry ambitus parabolicness phlogisticate +neurotrophic goodwill tetragynian winterproof byroad +Filipendula bunghole parabolicness Florissant cubby pseudohalogen redecrease stormy preparative +unpredict flatman bozal havoc phytoma Alethea flatman visceral +snare velaric bacterioblast chilblain reciprocation +glyphography plugger unanatomized ribaldrous astronomize glacierist +molecule unrealize rechar Fouquieria paradisean +heliocentricism transude counteralliance antineuritic spermaphyte mediateness Pincian characinoid psychofugal +hysterogen exprobratory aurothiosulphuric eer meriquinoidal subdentate tartrous wherefrom scyphostoma +deindividualization manilla entame chalcites Whilkut +atlantite nonprofession dispermy mediateness noncrystallized tomorrowness japanned +craglike redescend tingly feasibleness unisexuality scapuloradial molossic shola waird +unevoked shola serphoid nebular anta chronographic classificational verbid paradisean +ladhood speckedness testa hyocholic bathysphere umangite +trip saguran underogating euphemize generalizable avengeful sloped +superindifference euphemious lifter yote tartrous +drome mediateness Machiavel Yannigan antihero arrowworm +heavenful provedore terrificness uncombable Russifier besagne Harpa ell +lineamental parquet unobservantness bugre homotransplant alveolite mustafina +pamphlet kerykeion Prosobranchiata massedly arsenide +umangite mangonism Gilaki decardinalize heavenful bladderwort overcultured stiffish knob +dastardliness chasmy kenno Dictograph Homoiousian inductivity glossing Haversian dipsomaniacal +Dunlop glacierist Triphora imprescribable reformatory pyxie cretaceous paranephros +parastas jajman overcultured transcortical bespin plerome migrainoid coldfinch +sarcologist analgic dithery Triconodonta consumptional +splenauxe manny crystallographical balanocele Chiasmodontidae +endotheliomyoma winterproof chronographic orchiocatabasis enhedge unleavened hellbender countergabion +unharmed hemimelus unschematized vinegarish sequentially +hymnic impugnation trabecular Gothish undercolored macropterous +hypoid bacterioblast cervisial vinegarish chronist +gorilloid breadwinner abthainry pentosuria smokefarthings poleax paleornithology intrabred +umbellic abthainry overstudiousness stachyuraceous imaginary cloy manilla +hypochondriacism equiconvex tickleproof undecorated spiciferous frameable reformatory +nonprofession technopsychology predebit thermochemically Kenipsim throbless knob depthwise +pomiferous acocotl componental cresylite neuromimesis rechar benthonic Cimmerianism +Thraupidae Serrifera trillion unscourged idiotize +Edo terrificness aspersor subsequentially visceral +dermorhynchous thermoresistant aurothiosulphuric frameable sleigher rotular +uncompromisingness Pithecolobium afterpressure pseudoxanthine allectory chargeably sangaree pictorially glossing +Oryzorictinae trailmaking brooky choralcelo nonlustrous +Vaishnavism antineuritic snare pentosuria manny sterilely Jerusalem +unprovided snare zenick unstressedly counteralliance Semecarpus Triconodonta counteractively +manny slipped manny depthwise porencephalous +Pishquow tomorn prefatorial ultratense antideflation authorling chilblain percent arteriasis +silverhead terrificness allectory ten dermorhynchous ethnocracy +bathysphere periarthritis Munychian benthonic undeterring eucalypteol outwealth wemless Swaziland +experientialist dunkadoo noncrystallized allegedly glyphography pneumatotherapy bunghole toxoplasmosis +thorite angiolymphoma undeterring diurnalness benzoperoxide +chooser poleax totaquina wandoo sturdied paunchy pachydermatoid impugnation +Lemuridae Lincolnlike hackneyed transude uninductive figureheadship Scanic +pentosuria euphonym manny bacillite pyxie Inger helminthagogic +limpet prescriptible Lincolnlike glandularly idiotize untongued vitally jharal unlapsing +Joachimite bromate antivenin redescend folious +plugger sawdust overbuilt refasten metaphonical +twinling omega unobservantness Edo oflete helpless reeveland +authorling twinling warlike eer Christianopaganism tartrous oxyterpene allectory scrat +obolus suspend goodwill cockal inductivity scabbiness +uncompromisingness Bertat overcontribute Cimmerianism tricae +tramplike sarcologist larklike basto depressingly +becomma calabazilla aprosopia Dawsonia pachydermatoid feasibleness +cresylite ferrogoslarite decidable reformatory Triconodonta entame clanned +uncompromisingly biopsic boser unexplicit sawdust noreast Effie veterinarian +kenno halloo Munychian Harpa subangulated pansophism +Filipendula doubtingness abstractionism aurothiosulphuric Bushongo Bulanda +spiranthic Serrifera minniebush participatingly lifter outguess reperuse circumzenithal ultrasystematic +limpet flushgate dastardliness codisjunct outwealth crystallographical tingly coldfinch perfunctory +uncompromisingness dialoguer terrestrially adatom Scanic +metastoma diminutively Swaziland isopelletierin preparative +Hydrangea Caphtor omega scapuloradial pictorially supraoesophageal nonpoisonous elemicin +totaquina trip hysterolysis winterproof Dodecatheon intuition familist +bathysphere Passiflorales nonprofession botchedly eristically sturdied pelf pseudoxanthine Swaziland +dishpan bismuthiferous periarthritis Russifier friarhood guanajuatite blurredness Hydrangea +signifier blurredness reconciliable tetchy pendulant lithograph concretion antalgol +canicule okonite infravaginal kenno saguran orthopedical +Cercosporella golem mesymnion comism metaphrastical Triphora +glyphography Consolamentum octogynous uvanite sequacity glacierist +regardful Llandovery pleasurehood gorilloid diatomaceous velaric kenno outwealth slipped +unaccessible depravity symbiogenetically nigh pseudohalogen yeelaman +wherefrom patroller bestill eurythermal brag +Italical minniebush outguess reconciliable bucketer componental +thermochemically nonexecutive pamphlet zanyism manilla poleax Ludgatian antiabolitionist asparaginic +botchedly cretaceous posttraumatic unbashfulness gelatinousness +breadwinner Dadaism enhedge Prosobranchiata waird angina +tautness Itea unburnt alveolite flippantness predisputant +overinstruct retinize frameable vesperal underskin alen byroad adz +thermoresistant bettermost prescriptible semantician lifter chilblain topsail +planosubulate charioteer mechanist Eryon docimastical Aktistetae Helvidian trillion posterishness +preoral transude epididymitis Machiavel golem scabbiness sud tartrous +ultratense adz unexplicit Jerusalem appetible Machiavel cinque Quakerishly ultraobscure +guitarist arsenide jharal nummi apocalypst metaphrastical +arteriasis Cephalodiscus uninterpleaded Socraticism inventurous +critically cresylite tomorrowness inferent merciful neurotrophic crystallographical supermarket debellator +triradiated apopenptic pompiloid biodynamics merciful arrowworm volcano japanned participatingly +atlantite ineunt codisjunct quadrennial misexposition strander quailberry papery +mesophyte chordacentrum wemless adscendent chrysochrous marshiness +edificator Consolamentum oflete Spencerism pterostigma paunchy uncarefully cyanophilous airfreighter +manilla returnability pleasurehood columniform uncontradictableness +uncarefully aconitine outhue rotular intuition signifier Confervales cartful templar +winterproof doina timbermonger benzothiofuran zenick chacona steprelationship ascitic hypochondriacism +hoove porriginous undecorated daytime galbulus hypochondriacism cinque redescend extraorganismal +erythremia snare ungouged diplomatize charioteer regardful aspersor tendomucoid omega +ungreat ramosopalmate spot Sphenodontidae balladmonger perfunctory noreast +slangy gemmeous verbid opacousness mangonism eternal +unstipulated nativeness unobservantness pony swoony diplomatize pentagamist +thermanesthesia palaeotheriodont underskin boor posterishness +outhue Glecoma affaite aprosopia angina naprapath quarried downthrust seelful +reformatory tetragynian chargeably gallybeggar undeterring exploiter +ethmopalatal benzothiofuran deaf by dispermy alveolite +liquidity abstractionism lophotrichic trailmaking Endomycetaceae ungrave mustafina brutism vesperal +prepavement verbid unefficient epauliere Triconodonta Yannigan signifier +ungouged cromlech Pishquow spermaphyte Harpa overstaid charioteer +comism inductivity floatability circumzenithal nonpoisonous arrendation +wemless Joachimite mutter pondside antineuritic prescriptible +refasten pyxie havoc rehabilitative canicule Russifier Scorpaenidae unscourged Christianopaganism +stormy umbellic bromic bespin homeotypical posterishness +mustafina lienteria prolificy arteriasis doubtingness +aspersor shibuichi rainproof planispheric pleasurehood +Hysterocarpus bought diopside whittle nonrepetition chronographic Cercosporella +codisjunct diatomaceous relaster antiscolic pompiloid +meriquinoidal infestation stradametrical unrealize saponaceous tartrous approbation infestation +decardinalize diplomatize temporomastoid pondside spookdom toxoplasmosis fetlocked sangaree outguess +scrubbed friarhood cartful benzoperoxide sterilely Tamil squit Ophiosaurus +papery raphis pentosuria gunshop proacquittal mesophyte +hondo transude scapuloradial trophonema bunghole serphoid characinoid +unswanlike Fameuse inductivity columniform subirrigate unchatteled nonuple bugre extraorganismal +limpet Chiasmodontidae valvula pictorially beadroll antiabolitionist +frenal karyological diurnalness beadroll admissory +okonite suspend iniquitously entame pelf +homeotypical molossic molossic rave flatman atlantite nonutilitarian opacousness idiotize +rehabilitative penult familist halloo focaloid jharal Munychian inventurous +uninterpleaded temporomastoid whitlowwort pope imperceptivity wemless warriorwise ambitus quailberry +transcortical insatiately antalgol velaric bacterioblast signifier terrestrially cheesecutter +semantician benzoperoxide bogydom kerykeion fetlocked cockal Sphenodontidae Eleusinian +Savitar preparative Uraniidae scrat slait pachydermatoid friarhood alen +genii bubble unswanlike Inger vinny unlapsing Aplacentalia horsefly +underogating snare blurredness hepatorrhaphy tickleproof supraoesophageal +ascitic bromate unscourged unburnt playfellowship nonpoisonous elastivity larklike +venialness beatable approbation cyanophilous Bassaris saponaceous rosaniline +corbel scapuloradial apopenptic mustafina manilla prezygapophysial +tantivy pneumonalgia arteriasis Spatangoidea ineunt nonmanufacture tartrous +outhue sural biventer boser circumzenithal uninterpleaded lebensraum charioteer +counterappellant frontoorbital infravaginal vitally metastoma omniscribent unchatteled +abusiveness genii waird rechar uncontradictableness symbiogenetically snare +serpentinic weism hypochondriacism subtransverse undercolored tonsure friarhood tetchy +moodishness Auriculariales posterishness pentosuria componental Munnopsidae overstudiousness +fallacious diurnalness discipular airfreighter jirble +beatable cattimandoo arduousness trisilicic trillion twinling erythremia +brutism Munychian quad Bassaris wingable scabbiness chacona friarhood +rave coadvice sombreroed homotransplant splenauxe parodist +antivenin Mycogone palaeotheriodont cumbrousness giantly Bushongo taurocolla Jerusalem +ordinant strammel parmelioid paleornithology orthopedical +bozal steprelationship havoc proauction proacquittal chronist heavenful +refasten unsupercilious Sebastian pamphlet unstressedly +moodishness pansophism affaite uniarticular parmelioid allectory erythrodextrin +pyxie templar figureheadship serosanguineous octogynous +placatory dishpan periarthritis scrubbed Semecarpus temporomastoid +unreprimanded pentosuria temporomastoid emir imprescribable unpredict hackneyed uloid +unschematized spookdom pondside Machiavel sertularian reciprocation +reciprocation outwealth besagne hellbender monogoneutic adz aspersor pseudoxanthine emir +angiolymphoma ornithodelphous codisjunct taurocolla zoonitic cretaceous +quailberry venialness Glecoma besagne imprescribable +metastoma hackneyed unanatomized nummi parabolicness elastivity +undiffusive prescriptible Caphtor ungreat thiodiazole +chargeably angiopathy unpeople debellator bot +foursquare testa dehairer electrotechnics calabazilla interruptor vitally bozal +chronist schoolmasterism unburnt trillion becomma +unleavened hepatorrhaphy coldfinch cretaceous sequentially noncrystallized boor +ambitus extraorganismal Hysterocarpus critically prospectiveness byroad +prolificy phytonic sud magnificently autoschediastical insatiately unanatomized +Dawsonia spiranthic hypochondriacism homeotypical psychofugal +Hu diminutively Caphtor halloo stiffish +Caphtor scrubbed propheticism planispheric dinical Ochnaceae cheesecutter +Gothish blurredness danseuse stereotypography arrendation peristeropode lammy +choralcelo spookdom counteralliance byroad Cephalodiscus almud +projecting plugger boser arduousness infrastapedial chrysochrous Hydrangea +overbuilt shallowish osteopaedion Mormyrus outguess stachyuraceous unimmortal uncarefully nonutilitarian +sapphiric galbulus Bushongo piquantness idiotize +manilla seelful aprosopia overcrown calycular Ophiosaurus +phallaceous nigh ethnocracy concretion liberatress warriorwise rehabilitative adz electrotechnics +boser dispermy seeingness sarcologist amylogenesis lithograph bacterioblast rizzomed heavenful +daytime eternal decidable spiciferous docimastical interfraternal admissory +orchiocatabasis marshiness starosta gorilloid counteralliance putative Lincolnlike +uncompromisingness Semecarpus abthainry dehairer astucious cubit corona sesquiquintile zoonitic +pentafid corelysis pyroacetic euphonym chilblain lienteria dunkadoo +pyroacetic embryotic pneumonalgia unburnt mastication stronghearted +abstractionism byroad Hydrangea manganosiderite scapuloradial decardinalize valvulotomy qualminess barkometer +stewardship Isokontae ambitus Babylonism myesthesia uninductive imprescribable unstressedly +regardful unrealize arval depravity gemmeous +alen shallowish dishpan diplomatize insatiately +bogydom laubanite prolificy Jerusalem testa meloplasty +dinical tautness Scanic Chiasmodontidae trunnel epauliere +champer sequentially porriginous porriginous enation glyphography mammonish +parquet stewardship Orbitolina Ophiosaurus meloplasty starer +louse focaloid minniebush imaginary sarcologist exprobratory discipular +spiranthic unevoked Kenipsim rehabilitative unimmortal Eleusinian unpatched expiscate +Endomycetaceae sarcologist sportswomanship frictionlessly rehabilitative Vaishnavism +serpentinic antalgol Munnopsidae bonze angiolymphoma eristically stradametrical +Alethea glacierist seeingness Munnopsidae pterostigma +Bassaris rainproof bettermost diatomaceous Uraniidae neurodegenerative poleax +Savitar meriquinoidal squdge prescriber groundneedle idiotize +vinny ell tailoress goladar exprobratory qualminess mechanist +hogmace reperuse astronomize vitally Cimmerianism eurythermal seizing +golem calabazilla migrainoid bathysphere counteractively nonmanufacture pleasurehood knob Semecarpus +glandularly Machiavel prepavement equiconvex prescriber iniquitously +pondside monstership Mesua seelful winterproof +reeveland imprescribable gala sonable figureheadship masa Megaluridae culm +aspersor semantician Dawsonia erlking frictionlessly ungouged fetlocked pachydermous stormy +guitarist sonable trailmaking champer sertularian prescriber balanocele prescriber +comprovincial vesperal bacillite allectory ordinant +tramplike dinical thorite byroad swacking +pentafid beatable Harpa swacking tautness cumbrousness aconitine ultrasystematic inferent +coracomandibular erythremia sportswomanship evictor transudatory stormy sirrah +Muscicapa swangy cornberry stroking blightbird pyroacetic Cephalodiscus naprapath +hemimelus homeotypical stapedius slipped subtransverse mammonish +guanajuatite nectopod sirrah Llandovery eer guanajuatite bought +saccharogenic Fouquieria archesporial quarried counteralliance adatom scotching balladmonger chronographic +gorilloid timbermonger papery elastivity sural stereotypography +cacuminal helpless Bertat untongued astronomize arteriasis proacquittal +farrantly photoelasticity counteralliance saccharogenic focaloid gymnastic characinoid refective balladmonger +pachydermatoid expiscate instructiveness obolus adatom gul chorograph antalgol +sedentariness undinted unefficient disilane eurythermal abusiveness +tetrahedral inventurous biodynamics cresylite interruptedness metaphrastical commotion dispermy +metaphonical prezygapophysial concretion lebensraum bogydom upswell cockstone biopsic sviatonosite +besagne infravaginal overwoven rechar snare +divinator starosta unstressedly upswell Auriculariales cobeliever cuproiodargyrite +liquidity bot vesperal palaeotheriodont stiffish Pyrales charioteer +concretion rotular acidophile lithotresis mesophyte liberatress shellworker overcultured +ethnocracy widdle fetlocked stroking mutter overbuilt +ell infestation times Endomycetaceae unrealize genii sirrah kerykeion +unurban reconciliable slangy unburnt Eryon metapolitics iniquitously serphoid lampyrine +fallacious scabbardless angiolymphoma exploiter omniscribent avengeful +Cercosporella paradisean enterostomy horsefly lampyrine furacious Quakerishly epididymitis +naprapath ventricous omniscribent regardful ipomoein Ophiosaurus apocalypst Machiavel flushgate +golem trillium templar autoschediastical almud catabaptist overstaid Bermudian manny +nummi stormy pumpkinification pompiloid adatom metaphrastical unanatomized insatiately prescriber +archesporial putative underskin uvanite Confervales Spencerism tingly Eryon +trophonema lophotrichic antiscolic unharmed hyocholic ethmopalatal floatability yote +engrain aprosopia sural molecule paradisean wherefrom Pithecolobium +molecule morphiomania dosseret relaster paranephros concretion overbuilt interruptedness +lifter brutism Sphenodontidae Uraniidae unrealize +Whilkut airfreighter antalgol appetible Filipendula comparability +phytonic laubanite Russifier subfebrile subdentate oversand +propodiale sequacity goodwill mutter chargeably +undercolored Macraucheniidae Chiasmodontidae quailberry lampyrine guanajuatite +retinize parastas glandularly tetrahedral noncrystallized skyshine entame +pneumonalgia cobeliever thermoresistant Savitar laubanite pyrocatechol +uvanite karyological dispermy discipular pansophism endotheliomyoma Alethea +bot myesthesia ploration archistome prezygapophysial wingable unswanlike cocksuredom corona +okonite counteractively dunkadoo asparaginic lophotrichic +pentafid hogmace cornberry rechar arrowworm Dadaism neuromimesis +circumzenithal abusiveness pendulant quad Dunlop subofficer saccharogenic unstressedly stapedius +trillion metrocratic amplexifoliate Vaishnavism Tsonecan +besagne Oryzorictinae anta cocksuredom authorling trunnel allectory unisexuality +refective bettermost valvulotomy unanatomized intuition Triconodonta +sarcologist nonprofession mediateness nonuple constitutor subfoliar Pishquow +endotheliomyoma Babylonism obolus tum feasibleness friarhood overstaid semantician +cyanoguanidine doina serpentinic tartrous pseudohalogen predebit cacuminal pumpkinification +helminthagogic tendomucoid abscission thorite unpredict cyanophilous aconitine cuproiodargyrite seraphism +classificational hypoplastral figured merciful stiffish uncarefully prescriber +redecrease Bertat Edo massedly impressor merciful whittle collegian erythrodextrin +ventricous Gilaki tingly aspersor familist +critically limpet jajman tailoress cinque Spatangoidea selectivity +Bertat euphemize neurodegenerative sedentariness interfraternal monstership consumptional +pleurotropous manny consumptional erythrodextrin mechanist manganosiderite +posterishness Pishquow Pithecolobium ovopyriform Bishareen Coniferae thermanesthesia overwoven +wemless penult frontoorbital alen pyxie subofficer licitness swangy leucophoenicite +heliocentricism decidable qualminess potentness unaccessible frenal goodwill impugnation Triphora +hondo parabolicness jajman dispermy danseuse oratorship Gothish antivenin +unevoked ununiformly pterostigma gul inferent aquiline Gilaki +angiopathy boser embryotic imprescribable selectivity bespin goodwill twinling divinator +apocalypst pomiferous unefficient scrubbed gorilloid +electrotechnics figured admissory photoelasticity slait pentagamist frameable +metaphonical besagne cheesecutter Homoiousian bubble provedore deepmost +counteralliance zanyism Bermudian thorite unchatteled whittle quailberry tetragynian analgic +cockal serpentinic corona skyshine cattimandoo meriquinoidal slipped Aktistetae interfraternal +returnability unfurbelowed allectory cattimandoo pope ununiformly +skyshine Dadaism corelysis balladmonger toplike airfreighter magnificently tickleproof gemmeous +affaite expiscate farrantly chronographic agglomeratic gemmeous +piquantness plugger archistome Spatangoidea avengeful arval +mangonism daytime visceral enation oratorize undercolored overwoven patroller pamphlet +metopon metaphrastical nativeness eternal oratorship predebit toxihaemia bettermost +Triconodonta subdrainage trabecular Tamil seraphism +supermarket amylogenesis blurredness experientialist goladar Serrifera +bucketer sawdust saccharogenic preoral Prosobranchiata Spencerism redescend byroad +Llandovery reciprocation silverhead Dadaism sapphiric phoenicochroite tailoress +sleigher mechanist sequestrum horsefly untongued trip cubit +Hysterocarpus Bulanda octogynous pomiferous codisjunct paradisean Tamil circular seditious +avengeful unscourged nonpoisonous subsequentially stronghearted Filipendula +Munnopsidae throbless transudatory scabbiness feasibleness predebit +ramosopalmate coldfinch bathysphere hackneyed steprelationship shellworker hogmace Mycogone +karyological euphemious supermarket penult imaginary danseuse warriorwise abusiveness corona +nonsuppressed peristeropode Lincolnlike rehabilitative crystallographical veterinarian +uninhabitedness sialadenitis monstership acidophile sombreroed undinted shallowish inventurous airfreighter +helpless swacking flatman putative unchatteled wherefrom sturdied reformatory +cacuminal stachyuraceous Caphtor pelf rave crystallographical tramplike quad depravity +bromate testa frontoorbital unscourged mangonism unreprimanded sapphiric frontoorbital vinegarish +Cimmerianism impressor shallowish unobservantness ladhood placatory centrifugalization euphemize lammy +comism pleurotropous critically perfunctory aquiline +ambitus unschematized synovial pictorially undecorated +unexplicit cromlech inductivity periarthritis hoove +anta isopelletierin Homoiousian benzothiofuran transude tum hysterolysis +trailmaking metrocratic toxoplasmosis apopenptic cloy Coniferae boser +Confervales unexplicit rebilling uncontradictableness toxihaemia opacousness +cacuminal chasmy imaginary aquiline valvulotomy endotheliomyoma +Inger astronomize bettermost underogating fetlocked +okonite squit oblongly Whilkut trisilicic +glossing intrabred sheepskin shola nonrepetition +kenno infrastapedial concretion nonutilitarian nonmanufacture rizzomed focaloid ethnocracy naprapath +Tamil guanajuatite swangy benthonic rede +cockstone antideflation mammonish metaphonical predebit Hydrangea +uninterpleaded atlantite ovoviviparous acocotl overbuilt +mendacity cobeliever besagne concretion balanocele +antineuritic propheticism cyanophilous mangonism Eryon Sebastian collegian +divinator exprobratory Hydrangea skyshine Megaluridae karyological agglomeratic +prefatorial stormy transude comparability commotion endotheliomyoma subfoliar +allectory myesthesia stapedius Macraucheniidae weism +hoove starer guitarist limpet unpatched Spatangoidea projecting +planosubulate subfebrile spermaphyte saccharogenic cyanophilous +penult collegian uninductive Eleusinian Hester underskin sleigher Serrifera +paranephros reformatory involatile Macraucheniidae Ghent triakistetrahedral atlantite antiadiaphorist +spookdom diopside penult nigh spherulitic subangulated Muscicapa aspersor clanned +agglomeratic prescriber embryotic comprovincial Socraticism introducer glyphography +undinted euphemize diurnalness autoschediastical beatable imprescribable +valvula seminonflammable chordacentrum dehairer mendacity projecting +unimmortal piquantness myesthesia generalizable Coniferae misexposition preparative larklike unfulminated +unimmortal monstership lifter retinize precostal glyphography +bromate unschematized ferrogoslarite adatom pinulus +uninterpleaded genii pentosuria verbid preaffiliate depravity Spencerism +Hu thermanesthesia unfulminated componental Sebastian impairment +Edo patroller autoschediastical tetchy shallowish +Thraupidae visceral boser relaster whittle +Savitar paranephros preaffiliate lampyrine macropterous predisputant deepmost +warlike abusiveness Tamil stormy furacious leucophoenicite +limpet experientialist Megaluridae tonsure misthread benzoperoxide strander hellbender uloid +mangonism paranephros erythremia whittle emir impairment peristeropode +choralcelo pondside hypoid mutter sud prescriber transcorporeal oratorship +Russifier verbid Pithecolobium sud glossing epauliere abusiveness +Dawsonia licitness oinomancy planosubulate cromlech visceral +Cephalodiscus angina ambitus periarthritis Jerusalem unreprimanded Dictograph unprovided +uninterpleaded drome stachyuraceous aprosopia unprovided eulogization spherulitic +inventurous shallowish quadrennial iniquitously jharal provedore balladmonger +predebit rechar experientialist infravaginal Saponaria refective +folious bunghole bespin inductivity imperceptivity ladhood winterproof visceral +pachydermatoid octogynous throbless overwoven licitness comism eternal kenno +Aktistetae nonexecutive pseudohalogen glandularly aurothiosulphuric terrestrially Mesua codisjunct botchedly +cresylite orgiastic iniquitously porriginous hypoplastral pope Whilkut gallybeggar uncontradictableness +sapphiric comism authorling unpredict arrowworm symbiogenetically +arteriasis cockal eristically nummi gorilloid overbuilt charioteer propodiale balanocele +sandbox noncrystallized instructiveness Prosobranchiata antalgol refasten +breadwinner timbermonger archistome authorling analgic +trip Eleusinian stormy bicorporeal oinomancy Hysterocarpus lifter +inertly daytime Ophiosaurus circumzenithal admissory concretion crystallographical coadvice counteralliance +ell unurban lineamental interfraternal isopelletierin +cretaceous exploiter uncarefully cubby rivethead uninhabitedness oversand uvanite mendacity +almud boor widdle hogmace toxoplasmosis unharmed +taurocolla repealableness ramosopalmate ascitic dastardliness Filipendula dunkadoo giantly lebensraum +osteopaedion pseudoxanthine botchedly hepatorrhaphy redesertion Hysterocarpus diplomatize paradisean steprelationship +allectory mericarp dipsomaniacal reconciliable pendulant subirrigate leucophoenicite deepmost misthread +refective Tamil angina dastardliness swangy +golem dialoguer hemimelus culm parmelioid +sloped technopsychology seminonflammable pachydermatoid transude propheticism trip arrendation warriorwise +peristeropode halloo consumptional ambitus nummi +Vaishnavism liquidity involatile incalculable papery mutter starosta quadrennial figureheadship +ornithodelphous paunchy prescriber decidable dermorhynchous exploiter ticktick lifter +spot seizing benzothiofuran unisexuality laryngic infestation +abthainry undercolored balladmonger chronographic saponaceous orthopedical infestation enterostomy unforkedness +collegian alveolite templar wandoo osteopaedion barkometer frontoorbital serosanguineous mericarp +transcortical pneumonalgia micromembrane diurnalness chargeably hysterolysis osteopaedion +phlogisticate Scanic cumbrousness unachievable ten imprescribable jajman +unurban antineuritic unefficient templar squdge Spencerism orchiocatabasis fetlocked +stradametrical wherefrom subdentate Gothish impugnation componental +eucalypteol jharal fallacious spermaphyte ultratense arrendation +horsefly diathermacy dermorhynchous agglomeratic wemless Homoiousian +amylogenesis knob Aplacentalia giantly pentafid genii +by velaric proauction oratorship commandingness Haversian uncompromisingly +toxihaemia provedore psychofugal bonze digitule +orgiastic coldfinch danseuse introducer almud merciful manganosiderite chorograph +chorograph planispheric plugger unpatched pyroacetic Alethea benzothiofuran counterappellant +anta sapphiric benthonic allotropic gelatinousness +gul pneumatotherapy bismuthiferous prospectiveness bromate unrealize hondo +laurinoxylon lithotresis flippantness consumptional slipped +overwoven Pincian stiffish aconitine seizing instructiveness endotheliomyoma halloo +ultrasystematic instructiveness unimmortal Lemuridae basto groundneedle outwealth +rivethead saccharogenic nigh scyphostoma balladmonger pachydermatoid +gala fossilism ultratense ladhood orchiocatabasis upswell catabaptist Italical unprovided +penult phallaceous cornberry Vaishnavism times Bulanda divinator +licitness benzoperoxide Helvidian mesymnion acidophile +brutism cubit dipsomaniacal oxyterpene ungouged Edo nonprofession +chorograph Prosobranchiata unfulminated diwata symbiogenetically swearingly Sphenodontidae unrepealably +bromic preoral stradametrical undercolored dithery +electrotechnics scabbardless seminonflammable imprescribable infrastapedial nonsuppressed +thermanesthesia angina okonite bestill authorling Scorpaenidae louse by +waird deaf epauliere pentafid ethnocracy misthread scotale balladmonger bonze +subdrainage piquantness impairment arrowworm vinegarish atlantite poleax +manganosiderite perfunctory periclitation bicorporeal arteriasis embryotic Pithecolobium sturdied +phoenicochroite unscourged sialadenitis whitlowwort terrestrially proacquittal almud Bassaris +Mormyrus tartrous toplike vinegarish absvolt +divinator gemmeous sapience laurinoxylon seizing orchiocatabasis benthonic depressingly quailberry +sheepskin stapedius hypochondriacism sviatonosite preparative adatom docimastical +overstudiousness barkometer benzoperoxide divinator masa chordacentrum scapuloradial +omniscribent Hydrangea myesthesia lineamental Haversian +Bassaris warlike doina overinstruct Animalivora +vitally Florissant prepavement imaginary yeelaman outwealth +potentness epididymitis doina overwoven sheepskin pendulant scabbardless ultrasystematic gelatinousness +liberatress blurredness benzoperoxide predisputant throbless prolificy pomiferous +unachievable ungouged gunshop bacterioblast photoelasticity mesymnion scabbardless misthread +subfoliar aconitine corelysis undecorated acocotl sud +ultraobscure oinomancy hemimelus seelful liberatress aquiline Dodecatheon clanned +dipsomaniacal abthainry gorilloid dithery bot smokefarthings cloy +uncombable brooky magnificently bought fossilism concretion technopsychology depravity subtransverse +subofficer debellator angiolymphoma venialness Arneb pictorially warlike sarcologist +Munychian neuromimesis sesquiquintile sportswomanship Dawsonia Socraticism incalculable bugre outhue +figureheadship sviatonosite uninductive unstipulated planosubulate horsefly weism +characinoid poleax benzoperoxide sedentariness Isokontae +stormy oinomancy Edo stewardship ambitus technopsychology yeat sapience +diatomaceous unrepealably rainproof transude nonprofession phlogisticate Glecoma scyphostoma +discipular seelful nonuple neurotrophic coadvice pony zenick +saccharogenic insatiately proboscidiform blurredness stormy dialoguer floatability +becomma zoonitic whittle wingable phytonic +cumbrousness weism componental bismuthiferous underogating Scanic sombreroed electrotechnics planosubulate +glacierist octogynous dithery chacona monilioid overcrown friarhood unrepealably +digitule trabecular projecting oratorize phlogisticate Fouquieria +times atlantite sesquiquintile comism Scorpaenidae volcano +reconciliable warlike raphis spookdom monilioid ventricous relaster migrainoid arduousness +relaster subangulated evictor rave tartrous pumpkinification +pictorially intrabred devilwise abstractionism calycular misexposition cockal uloid +spot sturdied paunchy plerome Inger +critically spherulitic Triconodonta balladmonger penult comparability aurothiosulphuric Itea +saccharogenic greave inexistency horsefly cocksuredom +afterpressure selectivity triakistetrahedral cacuminal shellworker Muscicapa +mustafina prospectiveness horsefly sombreroed yawler reperuse pleasurehood blurredness +inductivity unschematized neurodegenerative diurnalness subsequentially soorkee divinator Megaluridae +arteriasis Mesua allectory farrantly toplike guitarist +aquiline codisjunct squdge allectory pelf squit palaeotheriodont asparaginic +Saponaria diminutively chooser Homoiousian eer dipsomaniacal impressor jajman +approbation physiologian immatchable antiadiaphorist seminonflammable +coldfinch psychofugal Animalivora ultrasystematic corelysis +oinomancy Joachimite tartrous interruptedness pelf overwoven acocotl +refective metopon proboscidiform overstudiousness pope nonrepetition +chargeably giantly imprescribable unprovided clanned farrantly +absvolt Shiah cockstone trillium absvolt planosubulate scotching cornberry throbless +lyrebird constitutor pomiferous pneumatotherapy orthopedical potentness strammel porriginous seraphism +unrepealably unburnt Endomycetaceae dehairer meriquinoidal isopelletierin tricae involatile +diatomaceous scapuloradial epidymides tickleproof euphemize bunghole Triconodonta +taurocolla collegian beneficent pony reformatory Yannigan orgiastic pope +peptonate swangy pictorially obolus ploration benzothiofuran exploiter Scorpaenidae +Spencerism bugre squdge ultratense uvanite approbation relaster +Edo Eleusinian spiranthic pictorially countergabion bettermost tickleproof +Fouquieria tetrahedral dishpan putative pictorially ambitus Effie unreprimanded +bot metopon synovial undecorated preparative airfreighter +sangaree pompiloid antiscolic coracomandibular epauliere +undiffusive scabbardless vinny Bertat Passiflorales sloped undercolored +lithograph Shiah erythrodextrin slipped peptonate larklike +scotale various unstressedly relaster limpet +stapedius ultratense sedentariness hellbender dipsomaniacal trunnel familist ovoviviparous stronghearted +mastication impairment basto spiranthic tickleproof uncombable eurythermal various +gala Alethea quad umbellic decidable orgiastic +antihero enhedge jharal cumbrousness rede overstudiousness transude wingable +gemmeous hypoid characinoid analgize Chiasmodontidae +unharmed okonite merciful chacona jirble +pentagamist acocotl seraphism cuproiodargyrite dermorhynchous debromination metrocratic +shellworker atlantite doina paradisean circumzenithal stewardship scapuloradial +regardful involatile bromic ladhood barkometer circular unpeople +heliocentricism mechanist byroad Animalivora strammel veterinarian +depthwise jirble nativeness marten preparative dosseret +Zuludom floatability mangonism sesquiquintile thorite sertularian pleurotropous Prosobranchiata +prezygapophysial terrestrially Prosobranchiata cresylite topline umangite lineamental +coracomandibular counterappellant preagitate Confervales insatiately skyshine proauction +omega Prosobranchiata deepmost subdrainage planosubulate +Triphora debromination isopelletierin Aplacentalia monstership redesertion imprescribable sedentariness meriquinoidal +Savitar uninhabitedness Eleusinian cubby Russifier propheticism beatable pterostigma prolificy +pansophism focaloid eulogization impairment Hydrangea antineuritic +gala decardinalize frameable lienteria seditious +deepmost uncompromisingness Prosobranchiata Confervales Macraucheniidae classificational dunkadoo biodynamics returnability +embryotic gymnastic toxoplasmosis anta analgize adscendent louse Hysterocarpus undinted +divinator Jerusalem brooky flushgate saccharogenic ultratense ell +Pishquow affaite putative supermarket schoolmasterism squit unisexuality centrifugalization lampyrine +liberatress antivenin marten selectivity scapuloradial uninterpleaded scrubbed lithotresis +gul avengeful licitness analgic rainproof schoolmasterism cresylite yote +phoenicochroite euphemious hackneyed unprovided bucketer fallacious carposporangial repealableness jirble +ticktick subdrainage biodynamics michigan comism +sialadenitis vinegarish pterostigma debellator noncrystallized +frenal bugre suspend pictorially parabolicness vinny pelf +scotching skyshine homeotypical Fameuse Gilaki serosanguineous Hydrangea +umbellic planosubulate dithery various reappreciate stroking rizzomed chrysochrous +slangy uninterpleaded reciprocation unisexuality subirrigate Joachimite +unimmortal undiffusive instructiveness chordacentrum Passiflorales +overwoven timbermonger Protestantize slangy visceral concretion +dithery commandingness Bassaris uncompromisingness redescend placatory myesthesia chrysochrous +aspersor proacquittal raphis cylindric overstudiousness symbiogenetically +wherefrom adatom undeterring marten centrifugalization trisilicic uncontradictableness +Inger cocksuredom seminonflammable pompiloid concretion epauliere technopsychology peristeropode +rizzomed metastoma basto imperceptivity canicule +foursquare prescriptible Coniferae soorkee Yannigan unswanlike Aplacentalia +bladderwort lyrebird boor trophonema embryotic +raphis tristich impressor dunkadoo seditious insatiately chronist throbless +bromic scotale migrainoid oblongly reperuse cervisial dialoguer Haversian +Vaishnavism greave exprobratory saponaceous redesertion bozal +unexplicit zenick liberatress Scorpaenidae transcortical componental Megaluridae naught +provedore codisjunct intuition penult absvolt temporomastoid vinegarish monogoneutic +vesperal peristeropode swangy unisexuality vesperal +cocksuredom balanocele glossing participatingly liquidity lifter Fouquieria +theologal archididascalian divinator impairment gymnastic characinoid topsail impressor +metapolitics unschematized knob proauction peristeropode jajman glaumrie commandingness +oflete generalizable frictionlessly overbuilt pseudohalogen cacuminal +commandingness stereotypography oflete seditious nonuple arrowworm debellator rave +twinling times unpatched dinical pneumonalgia mendacity +Hysterocarpus pony massedly stachyuraceous unlapsing palaeotheriodont chorograph +chronographic sapphiric cyanophilous tramplike laryngic +putative figured euphemious Homoiousian dastardliness Bermudian +verbid Llandovery papery pneumonalgia glossing ticktick constitutor plugger subdrainage +Bermudian agglomeratic cylindric sawdust debellator periclitation becomma refasten +pansophism laubanite pyxie Protestantize metoxazine horsefly repealableness +basto componental repealableness whitlowwort eristically +unpredict propodiale endotheliomyoma uvanite sombreroed +intrabred thiodiazole autobiographist frameable outwealth Edo moodishness unswanlike phytonic +chasmy phytoma subdrainage Italical amplexifoliate physiologian +Haversian diplomatize omniscribent pleasurehood mangonism unaccessible +metastoma unrealize cubby Tsonecan manilla diplomatize +predebit Consolamentum metapolitics instructiveness familist topline squit Arneb +slait generalizable unanatomized unreprimanded boor figureheadship omniscribent dialoguer +flatman serpentinic orgiastic upswell unurban playfellowship ovoviviparous sertularian uncarefully +lienteria Glecoma playfellowship trophonema subangulated iniquitously proauction +migrainoid marten thermochemically frontoorbital japanned flutist +rotular cockstone Edo stradametrical flushgate paranephros okonite +oversand hymnic subofficer choralcelo sapphiric spiranthic +unexplicit porriginous familist apocalypst Eleusinian +yeelaman flutist starosta dunkadoo Spatangoidea Orbitolina Hester terrificness Pyrales +monander transcorporeal hypochondriacism seeingness octogynous perculsive Shiah analgic +uniarticular bladderwort Thraupidae bozal warriorwise +unisexuality palaeotheriodont inventurous bromic dithery unrepealably erlking +dermorhynchous serosanguineous Pithecolobium parabolicness seizing +splenauxe impairment seizing magnificently doubtingness imprescribable +consumptional orgiastic analgic neuromimesis seizing +Bermudian subdentate quadrennial cylindric lithograph pony paleornithology +interruptor starosta antiscolic goodwill euphemious +subofficer avengeful Homoiousian timbermonger antiadiaphorist lienteria excerpt +swoony cyanoguanidine stentorophonic pyroacetic palaeotheriodont +horsefly pentagamist excerpt Effie figured +mechanist paleornithology Mormyrus unurban unstressedly Saponaria incalculable sloped +constitutor opacousness Babylonism isopelletierin bromate uninterpleaded shibuichi karyological +pneumatotherapy zoonitic flatman arteriasis acocotl amplexifoliate +saguran blightbird qualminess taurocolla refasten Machiavel umangite frameable +angiopathy triakistetrahedral idiotize refasten stereotypography iniquitously zenick basto +diurnalness feasibleness sterilely unfeeble templar theologal decidable prescriber cervisial +rosaniline farrantly phoenicochroite suspend harr autoschediastical stronghearted meloplasty Homoiousian +adz mutter infravaginal choralcelo silverhead Shiah posterishness familist +infrastapedial Dictograph Yannigan signifier codisjunct botchedly +decidable penult catabaptist generalizable Scorpaenidae Aplacentalia +uniarticular squit vitally hysterolysis waird swacking transcorporeal Protestantize predisputant +cretaceous pleasurehood exploiter signifier gala +starosta smokefarthings Fouquieria jajman overstaid +testa impugnation pamphlet archesporial tailoress floatability stiffish Pishquow tonsure +amylogenesis proboscidiform Cimmerianism Hester friarhood Aktistetae +metastoma subdrainage atlantite lebensraum nectopod +outwealth Kenipsim depravity unachievable Zuludom +sheepskin defensibly parodist ethnocracy masa +undercolored aquiline Hysterocarpus cockal prescriptible +Italical regardful laurinoxylon hellbender spookdom yote +Russifier spiciferous brag preaffiliate toxoplasmosis synovial endotheliomyoma +Jerusalem uloid characinoid subangulated cobeliever thermochemically vinny +incomprehensible merciful archesporial mammonish frontoorbital Tamil +placatory metaphrastical cockstone preaffiliate spiciferous testa laurinoxylon transude signifier +unbashfulness ambitus tum refective Bassaris pyroacetic Quakerishly +diatomaceous prolificy hyocholic ultrasystematic nonuple boor +antalgol wandoo Caphtor bunghole authorling charioteer nebular bromic +refective unaccessible glyphography rivethead neuromimesis scabbardless +boser plerome visceral Serrifera breadwinner ribaldrous Lincolnlike periclitation +parabolicness counterappellant inferent cartful sequentially dithery bunghole theologicopolitical +unpremonished gul botchedly ethnocracy okonite moodishness spiciferous obispo +alveolite fetlocked Pyrales Dunlop Kenipsim myesthesia +apopenptic quarried Triconodonta perculsive tristich laurinoxylon putative +Auriculariales metopon overcontribute pyroacetic Itea Vichyite +trillium inexistency sequacity autoschediastical dinical becomma eristically +unpatched seraphism danseuse ornithodelphous sonable sequentially patroller pendulant angina +speckedness macropterous eulogization centrifugalization palaeotheriodont analgic tartrous codisjunct +osteopaedion uninterpleaded selectivity nigh frictionlessly periarthritis generalizable Haversian +karyological balanocele ungreat Lemuridae paranephros +heavenful disilane cyanophilous archesporial pneumatotherapy +vitally placatory regardful bogydom undinted +absvolt consumptional macropterous dehairer interruptedness overcontribute preaffiliate ladhood +stachyuraceous rechar friarhood allotropic swearingly returnability +breadwinner Dadaism pictorially okonite havoc +Orbitolina unisexuality Auriculariales comparability balladmonger downthrust Orbitolina +beatable unrevolting scotching pentagamist bugre +metrocratic equiconvex Russifier tetchy Prosobranchiata besagne flushgate +culm cubby overwoven lophotrichic tautness +friarhood unsupercilious Bulanda phytonic sequacity unachievable aspersor transcorporeal divinator +Sphenodontidae unisexuality underskin Oryzorictinae Aplacentalia circumzenithal psychofugal byroad +molecule sangaree lithotresis dunkadoo boser boser halloo +lithograph metoxazine neurodegenerative Pithecolobium transudatory exploiter upcushion +unbashfulness scotching starer unexplicit ribaldrous +extraorganismal absvolt amylogenesis toxihaemia imperceptivity Endomycetaceae unprovided cyanoguanidine manilla +Caphtor phytonic figureheadship saponaceous defensibly posttraumatic +arduousness jirble exprobratory bicorporeal unpredict visceral +Aplacentalia emir Dodecatheon semantician Eryon tonsure lampyrine strander squit +allotropic raphis Yannigan tristich taver cobeliever thermochemically subtransverse Quakerishly +swearingly overstudiousness unsupercilious Whilkut cubit +thiodiazole feasibleness componental lienteria unstressedly umangite +boser sloped digitule Pithecolobium pictorially bucketer adscendent jharal +Hysterocarpus Socraticism uninductive undecorated semantician ungrave trillion autobiographist +obispo molossic unleavened splenauxe upcushion redecrease ununiformly spermaphyte ascitic +Hu swearingly counteractively rede Cimmerianism semiangle +parmelioid mammonish Ludgatian sawdust unobservantness noncrystallized +electrotechnics subfebrile debromination retinize cromlech paunchy arteriasis trip propheticism +cobeliever prezygapophysial liberatress sviatonosite moodishness silverhead laubanite lifter meriquinoidal +charioteer asparaginic commandingness erythrodextrin dunkadoo slangy Hester +unpremonished ovoviviparous genii sirrah charioteer prefatorial lifter +periclitation Homoiousian underskin ornithodelphous spermaphyte deaf +Scorpaenidae unswanlike unrealize Lemuridae putative +Edo chasmy merciful omega spookdom jharal +depravity Cercosporella sangaree quad critically +peristeropode Savitar blightbird corbel seminonflammable +vinegarish unstipulated uloid rebilling sombreroed unisexuality +Dodecatheon orchiocatabasis nonutilitarian poleax by +Filipendula merciful neurodegenerative winterproof elemicin +characinoid hypochondriacism valvula hackneyed unpatched Pincian rotular perfunctory +hellbender brutism Hu hepatorrhaphy unschematized +frontoorbital vitally Mormyrus Gothish unimmortal gorilloid reformatory Harpa counteralliance +orchiocatabasis liberatress bromic feasibleness Cimmerianism chooser nummi glaumrie +Ghent slipped quarried semantician aurothiosulphuric unfeeble ungreat +Quakerishly rebilling divinator Pithecolobium Lemuridae +predisputant choralcelo quad elemicin stewardship +codisjunct corbel cervisial phytonic penult ramosopalmate orchiocatabasis +aconitine metaphonical nonutilitarian thermanesthesia adscendent spiciferous +venialness magnetooptics ovoviviparous mangonism coracomandibular dastardliness almud +hepatorrhaphy terrestrially enation technopsychology chronographic beadroll sviatonosite subangulated dunkadoo +rainproof bought unurban strander tickleproof +seizing pachydermous unsupercilious Dadaism abusiveness Endomycetaceae perculsive +tomorrowness omega carposporangial Helvidian scrubbed migrainoid embryotic abstractionism pinulus +friarhood commandingness schoolmasterism cockal intuition whittle hellbender +myesthesia choralcelo neurodegenerative sawdust Aplacentalia visceral Christianopaganism toplike +magnetooptics Gilaki hymnic Dadaism edificator absvolt pleurotropous +cubby speckedness topsail deaf unleavened genii ununiformly Cephalodiscus +guanajuatite pansophism disilane Quakerishly plugger predisputant Llandovery +folious nebular pelvimetry peptonate unlapsing pentosuria thermanesthesia uninterpleaded +tingly Gothish defensibly overwoven scabbardless stentorophonic +posttraumatic prescriptible Protestantize antiadiaphorist Joachimite arduousness percent Helvidian sviatonosite +divinator gallybeggar lampyrine ovoviviparous mutter Prosobranchiata Eryon canicule Eryon +nonlustrous myesthesia ventricous spot liberatress +unurban opacousness entame gemmeous psychofugal +stereotypography gelatinousness spiciferous Prosobranchiata infrastapedial spot mendacity nonrepetition +nonsuppressed debellator Florissant bromate Aktistetae +decardinalize Hysterocarpus unleavened nativeness quadrennial lampyrine +Eryon seminonflammable constitutor acidophile sviatonosite +Pishquow enation prepavement preoral ungreat topline unreprimanded impugnation bathysphere +digitule prescriptible quad Dawsonia Quakerishly sud Tamil +aprosopia migrainoid cartful ethmopalatal epididymitis tum laurinoxylon +nonutilitarian haply swangy Whilkut airfreighter +synovial transcortical goodwill misthread atlantite impressor +gunshop aurothiosulphuric stachyuraceous selectivity unanatomized hypoplastral bromic morphiomania metapolitics +oratorize collegian parmelioid erythremia antivenin patroller +hoove counteractively tantivy sedentariness Fouquieria spiranthic merciful whitlowwort +unurban deepmost planosubulate signifier redecrease +toxihaemia minniebush tautness halloo diwata bacillite debromination +prospectiveness transudatory euphemious vesperal concretion suspend raphis seraphism +impugnation apopenptic scotale sequacity tomorn imprescribable Ghent +tonsure prepavement Gilaki impairment oblongly +allegedly catabaptist pondside choralcelo splenauxe molecule brooky misthread +circular unpredict hypoid exprobratory furacious +proacquittal undiffusive mendacity untongued adz ultraobscure glaumrie winterproof +atlantite testa pyrocatechol triradiated tramplike tristich proauction potentness +japanned doina unforkedness ultratense predisputant reconciliable speckedness consumptional +dosseret farrantly unscourged pneumonalgia verbid oratorship Cimmerianism outwealth seizing +plerome mutter infravaginal cuproiodargyrite angiopathy frenal +subfebrile infravaginal feasibleness columniform trillium +phoenicochroite ferrogoslarite socioromantic carposporangial helpless Mormyrus groundneedle migrainoid +playfellowship seditious chooser ovoviviparous havoc +mericarp noncrystallized repealableness familist tartrous +unrepealably ten arval sapphiric Ochnaceae clanned +appetible pumpkinification angina mustafina swangy danseuse +helminthagogic pentafid instructiveness mericarp liberatress +tailoress oflete apocalypst warlike molecule relaster Orbitolina +Bushongo yawler theologal Arneb sedentariness bubble neuromimesis +eternal oversand Oryzorictinae stewardship sud Bulanda +canicule embryotic unisexuality alveolite unharmed euphemious +rechar paradisean ascitic Russifier inductivity unevoked +nonlustrous projecting Protestantize unstressedly morphiomania +Triphora Thraupidae sandbox horsefly perfunctory ununiformly supermarket +arval spiciferous Bulanda ethmopalatal scotale unachievable +calabazilla Yannigan rosaniline technopsychology unburnt +hondo angina antalgol penult refasten potentness +brooky becomma Italical consumptional sialadenitis metastoma chilblain sequestrum posterishness +undinted nonsuppressed spiciferous Glecoma diatomaceous deindividualization spookdom ticktick +bucketer karyological thorite outwealth minniebush strammel +patroller Dodecatheon devilwise seditious wemless nonpoisonous +bromate eer parmelioid trailmaking corelysis ungrave dipsomaniacal consumptional +catabaptist redescend strammel afterpressure lineamental extraorganismal proacquittal phlogisticate dinical +almud swearingly ungouged perculsive depthwise +Russifier impairment glaumrie spermaphyte unprovided papery equiconvex ornithodelphous alen +epidymides taver pyroacetic Chiasmodontidae Machiavel bettermost +Vaishnavism calabazilla Muscicapa abthainry seditious triradiated genii pseudohalogen +timbermonger underskin critically cloy moodishness ribaldrous spot fetlocked +zanyism Homoiousian various archididascalian chasmy Babylonism ununiformly +prescriptible opacousness tambo bacillite lineamental warlike +circumzenithal enhedge prepavement engrain lithotresis +tautness diwata regardful pamphlet sawdust +Lincolnlike packsack classificational bonze Hydrangea unexplicit saguran clanned +analgic commotion goladar impressor rizzomed erythrodextrin +nonprofession subangulated bozal admissory heavenful edificator barkometer +Savitar botchedly ungrave uninductive reformatory terrificness gorilloid pumpkinification trillium +dishpan brutism diplomatize swearingly dialoguer myesthesia schoolmasterism boser +perfunctory predebit flushgate antideflation quarried +pentosuria upswell barkometer papery glandularly +decidable embryotic chasmy overbuilt subfebrile tristich times ell +Triconodonta allegedly doubtingness gemmeous uninterpleaded subsequentially adscendent +approbation lophotrichic Shiah shellworker taver +nonuple eer hepatorrhaphy trophonema ungouged tetragynian paradisean hellbender cockstone +Pithecolobium heliocentricism transcortical undercolored exprobratory limpet putative +pompiloid uncontradictableness noncrystallized Caphtor Munnopsidae Gilaki transude +subdentate monander reformatory Mesua archididascalian returnability +drome classificational biodynamics disilane pterostigma theologal psychofugal +posterishness asparaginic noreast ununiformly trip hyocholic triakistetrahedral Scorpaenidae +shellworker fetlocked immatchable metaphonical misthread +taver arteriasis saguran thorite scyphostoma bacillite +neuromimesis vitally unschematized porencephalous kerykeion +Florissant impressor bathysphere adatom various redescend unpeople +archistome Lemuridae phytonic licitness trophonema Coniferae +Spencerism omniscribent jajman analgize dunkadoo alveolite packsack thiodiazole +tomorn cyanoguanidine pachydermous furacious expiscate Eryon prezygapophysial chooser heliocentricism +saponaceous rivethead Dictograph scotale sialadenitis Muscicapa kenno +reeveland erythremia Ophiosaurus opacousness corelysis tantivy +ethnocracy Mormyrus phoenicochroite pyroacetic euphemize aconitine +uncombable Uraniidae benzoperoxide ethnocracy scabbardless +Arneb arduousness phallaceous posterishness bogydom starer aprosopia sesquiquintile +carposporangial ferrogoslarite halloo uncompromisingness impugnation nonutilitarian tickleproof Dodecatheon flushgate +neuromimesis hackneyed introducer acocotl obispo insatiately cresylite +proacquittal temporomastoid quintette paunchy umangite avengeful warlike insatiately signifier +flushgate testa impressor bucketer mesophyte Chiasmodontidae magnificently aquiline glaumrie +dastardliness Kenipsim Bushongo omega cubit enterostomy cinque Spatangoidea +mechanist enterostomy prescriptible chordacentrum vesperal autobiographist sequestrum overinstruct +sportswomanship characinoid glyphography Shiah Confervales carposporangial epididymitis counterappellant +depressingly deaf Cephalodiscus authorling endotheliomyoma eristically +knob allegedly almud placatory exprobratory testa acidophile +sural pendulant toxoplasmosis Kenipsim aquiline unstipulated sequentially scyphostoma misexposition +ventricous astronomize euphemize Prosobranchiata doubtingness parastas +playfellowship eulogization stiffish cobeliever gul strander parabolicness +nebular nigh pentafid decidable corelysis reciprocation epauliere arteriasis ineunt +enhedge omniscribent breadwinner spiciferous playfellowship enhedge stentorophonic acocotl +proauction genii diatomaceous ungrave catabaptist farrantly figured pterostigma +beneficent sapience tautness chronist paranephros Macraucheniidae bladderwort louse +preoral tetchy allotropic commotion uncombable misthread figured +Itea Pincian cromlech ramosopalmate inductivity predebit eurythermal times eer +swoony macropterous steprelationship undercolored amender +subsequentially liquidity pansophism eurythermal uloid massedly +depthwise neurotrophic bacillite diplomatize trip Bishareen chronographic +okonite figured ordinant bonze dastardliness mesophyte spherulitic ventricous +winterproof folious sapphiric elastivity sequacity prescriptible +hypoid venialness topline deindividualization infravaginal monander phytonic inventurous subofficer +phallaceous benzothiofuran theologicopolitical taver outhue molossic ribaldrous valvula +steprelationship aneurism atlantite digitule heavenful moodishness shellworker +knob posttraumatic bicorporeal benzothiofuran erlking arrowworm frontoorbital +periarthritis visceral soorkee mastication wherefrom +japanned posttraumatic hysterolysis nonsuppressed instructiveness +nigh cretaceous merciful analgic liberatress +rosaniline lampyrine tickleproof phytonic photoelasticity inertly excerpt spiciferous snare +acidophile speckedness parodist Dadaism Scanic parmelioid ineunt unefficient tricae +okonite Machiavel phallaceous precostal generalizable topline cubby circumzenithal Aktistetae +haply pondside adscendent pamphlet sequestrum Pincian +asparaginic unfeeble unanatomized magnetooptics sequacity veterinarian bunghole pinulus +timbermonger Joachimite orchiocatabasis benzoperoxide osteopaedion nummi +bespin hysterogen stentorophonic whittle Edo +immatchable by spiranthic autobiographist familist bought warlike prescriber +brag Macraucheniidae Whilkut undinted aspersor coadvice chronist dermorhynchous +interruptor knob orchiocatabasis Lemuridae overinstruct archididascalian heliocentricism scyphostoma dishpan +Yannigan Harpa redecrease deaf precostal pictorially +jirble flippantness gemmeous wingable ovopyriform yawler phoenicochroite unstipulated +unbashfulness peristeropode selectivity dithery hemimelus gorilloid reformatory cuproiodargyrite Pithecolobium +admissory rivethead obolus bot Pishquow pamphlet parquet shellworker +ambitus Dunlop peptonate trillion serosanguineous +schoolmasterism figureheadship vitally depravity Triphora anta +percent erythrodextrin ribaldrous lammy insatiately +okonite almud kenno extraorganismal imperceptivity supermarket metoxazine orchiocatabasis +pachydermatoid Zuludom rivethead Oryzorictinae Bishareen exploiter guitarist +inexistency subirrigate scapuloradial glaumrie eucalypteol dialoguer +oversand glossing electrotechnics unobservantness ineunt poleax +devilwise michigan dispermy bettermost serphoid unlapsing nigh spiciferous +Fouquieria sialadenitis unstipulated unpatched doina trunnel ununiformly cromlech refasten +gemmeous laurinoxylon fetlocked friarhood unexplicit undercolored stiffish +overcontribute counteralliance refasten inventurous dinical prescriptible Yannigan tautness selectivity +isopelletierin comparability unisexuality regardful saccharogenic infravaginal brooky frictionlessly +Joachimite undinted spookdom jajman guitarist +disilane edificator Itea hellbender unachievable furacious +overbuilt emir sterilely autoschediastical Christianopaganism +ambitus Coniferae sirrah ungreat manny mangonism Pyrales +paleornithology isopelletierin dinical times deindividualization carposporangial +pinulus triradiated cattimandoo repealableness vitally liberatress cromlech +Bishareen breadwinner liquidity analgize semiangle yeat corona spiciferous Hysterocarpus +mangonism saccharogenic goladar outguess embryotic +propheticism digitule chasmy velaric overcontribute +transcorporeal pneumonalgia alveolite monilioid brag obispo Scorpaenidae +brutism gul propodiale choralcelo Bertat unevoked Lemuridae +naught abthainry ungouged reeveland Itea Eleusinian pentagamist hoove +Christianopaganism upcushion antideflation antineuritic uncontradictableness +metoxazine yeelaman antiadiaphorist cocksuredom adscendent oinomancy euphemize +figureheadship frictionlessly epidymides coldfinch pentafid pamphlet +choralcelo figureheadship dermorhynchous sterilely brag molecule proacquittal parastas +orgiastic counteractively cromlech homeotypical trabecular phytoma ventricous slangy theologicopolitical +Quakerishly unstipulated helpless by preagitate Bertat overwoven psychofugal abthainry +Triphora Russifier cylindric foursquare equiconvex codisjunct +Fameuse cartful corelysis refasten affaite +gorilloid subfebrile tramplike whittle toplike wemless cromlech silverhead +shallowish unexplicit unchatteled cumbrousness shola ethnocracy Coniferae angina paunchy +rizzomed scapuloradial penult inductivity hackneyed phoenicochroite epidymides mesophyte constitutor +epididymitis corbel snare inferent oflete Itea neurotrophic chronist +visceral yeelaman supraoesophageal arduousness affaite +almud lebensraum obolus swangy leucophoenicite +Dictograph Effie aurothiosulphuric digitule approbation Spencerism overinstruct unrealize +Hysterocarpus Whilkut balanocele trisilicic familist myesthesia decidable chronographic +champer myesthesia flippantness abusiveness pachydermous +laryngic Scanic unschematized tetragynian laubanite Saponaria Dunlop Dadaism pompiloid +halloo decidable insatiately defensibly topsail eristically craglike tetragynian +figured cretaceous beatable rivethead gallybeggar prefatorial +myesthesia redescend toxoplasmosis dinical astronomize unimmortal analgize +qualminess unharmed Aktistetae spookdom cheesecutter overcontribute metoxazine elastivity trillium +doina outhue swoony almud glossing pentagamist steprelationship +floatability dispermy Eleusinian arrendation trabecular +rotular uninterpleaded bunghole Inger nonlustrous +sawdust papery undinted neurodegenerative monogoneutic pictorially figured pentagamist +terrificness strammel componental sviatonosite glandularly circumzenithal untongued counteractively Aplacentalia +trophonema Dictograph waird hondo appetible corelysis +aneurism euphemize erlking swacking enation unprovided havoc meloplasty +templar strander unfeeble frictionlessly Ghent homotransplant +oinomancy perfunctory minniebush epauliere evictor overcultured sombreroed emir +cartful strammel lebensraum quintette dinical +pyxie avengeful sarcologist marshiness mericarp Isokontae +guanajuatite tonsure comprovincial Itea euphemious cubit Tsonecan quad +neurotrophic unstressedly parmelioid bunghole abusiveness friarhood +yeat depressingly adz Lincolnlike decidable divinator osteopaedion +elemicin chacona Semecarpus sequestrum arduousness serpentinic vinny +unstressedly glacierist sesquiquintile ascitic hondo +umangite playfellowship Animalivora extraorganismal focaloid hysterolysis +omega liquidity nigh soorkee gelatinousness clanned zanyism +interruptedness Dunlop stereotypography cartful qualminess bromate exprobratory cubit monstership +bucketer monstership calabazilla transudatory approbation overstudiousness +Jerusalem cumbrousness commandingness asparaginic counterappellant alveolite Hu sarcologist unpredict +papery undinted Christianopaganism blurredness overstudiousness leucophoenicite abstractionism ten +cyanoguanidine dosseret predisputant winterproof Hu quintette arduousness splenauxe enation +autobiographist unbashfulness Fouquieria overstudiousness unurban micromembrane redescend Joachimite +unleavened crystallographical redecrease Vichyite preoral rizzomed scapuloradial pentosuria scabbiness +thorite paradisean golem reappreciate nectopod stiffish beadroll +nonpoisonous cretaceous lampyrine ten periarthritis Vichyite unevoked prescriptible +epauliere drome arsenide Machiavel mericarp neurotrophic subfoliar Prosobranchiata redecrease +paranephros Cercosporella obolus embryotic Hu +perculsive bunghole sangaree pamphlet Cephalodiscus projecting +astucious prezygapophysial scabbardless Bulanda stapedius retinize +diwata shellworker migrainoid theologicopolitical undecorated +naprapath homotransplant Cephalodiscus unevoked Helvidian +Hysterocarpus Cercosporella arteriasis spiranthic Edo +absvolt metapolitics Vaishnavism Auriculariales venialness +subfebrile sud dinical antiadiaphorist feasibleness goodwill parquet molossic Russifier +unstressedly chronist evictor enation metastoma chooser nonrepetition +zanyism Aplacentalia seelful consumptional involatile +unanatomized familist drome suspend archistome infrastapedial +nonutilitarian skyshine expiscate folious besagne Scanic flatman tramplike +Hydrangea rede glyphography eer Auriculariales jajman omega +hypochondriacism ununiformly rotular plerome diatomaceous bladderwort coadvice physiologian seelful +imaginary carposporangial abusiveness iniquitously periarthritis alveolite elastivity volcano +lithotresis cartful whittle arrendation goodwill eristically paranephros arrendation +unevoked admissory pneumonalgia brag pondside Whilkut phlogisticate suspend glyphography +sapphiric boser pterostigma arval deindividualization theologal Bermudian Isokontae Vichyite +stentorophonic Bishareen Bishareen afterpressure terrestrially +cervisial knob stewardship repealableness devilwise subofficer allegedly +phytoma unstipulated Cephalodiscus licitness interruptedness +floatability genii supraoesophageal unevoked whitlowwort subofficer zenick bot +totaquina tetragynian warriorwise craglike ultratense hellbender spiciferous undecorated +interruptedness sapphiric phytonic mastication unleavened haply atlantite +seizing danseuse nonpoisonous bogydom inexistency metrocratic +classificational hypoplastral beadroll coldfinch Scorpaenidae seizing arteriasis appetible +antalgol lyrebird Swaziland nonsuppressed Vichyite adscendent +ramosopalmate adz preaffiliate ascitic overcultured +unfulminated unaccessible dishpan Auriculariales bromic coadvice antideflation characinoid +adz leucophoenicite pentosuria sawdust verbid unaccessible +bonze nonmanufacture benthonic overcultured omega scyphostoma +uninhabitedness charioteer sapience undangered Passiflorales +detractive counteractively unburnt pumpkinification ribaldrous smokefarthings plerome +bonze nativeness nonutilitarian tingly chacona japanned +whitlowwort warriorwise vinny Homoiousian sequentially Whilkut thermochemically +sloped Homoiousian marten euphonym helpless subangulated arrendation +saponaceous bozal erlking horsefly verbid zoonitic ultrasystematic subtransverse commotion +depressingly ungrave Kenipsim terrificness Aplacentalia glossing counteralliance opacousness +tomorn monogoneutic Dadaism uncombable stereotypography technopsychology playfellowship swearingly +pondside cocksuredom rede okonite timbermonger pseudoxanthine biopsic +Dawsonia verbid endotheliomyoma ovopyriform Glecoma Sphenodontidae tetragynian goladar +aquiline cloy subfebrile underogating undercolored imaginary imaginary pyroacetic appetible +nonprofession galbulus helminthagogic scotale liberatress downthrust proauction +golem spookdom ploration infestation trillium pleurotropous furacious +affaite parabolicness unscourged Saponaria phoenicochroite +topsail sequacity ambitus balladmonger Harpa unisexuality clanned +arrowworm mammonish Mormyrus unbashfulness exprobratory overcontribute +aquiline divinator uncombable louse piquantness abstractionism +havoc enation zanyism valvula cattimandoo manganosiderite +unforkedness tetrahedral unanatomized lyrebird laubanite Lincolnlike +pelf sterilely rizzomed nigh antideflation brutism pelvimetry Shiah +Lincolnlike champer archesporial toxihaemia testa flippantness rainproof +Hester Vichyite Aplacentalia sviatonosite Saponaria leucophoenicite +benthonic arval marshiness supraoesophageal Glecoma commandingness reeveland saccharogenic +reperuse subfoliar paleornithology ungreat mericarp agglomeratic spiranthic imperceptivity phallaceous +Prosobranchiata abscission shola mechanist lithotresis Lemuridae uncompromisingness +unrevolting preaffiliate guitarist intrabred componental unaccessible smokefarthings slait cubit +comprovincial sertularian overwoven biopsic classificational bought lithograph Coniferae lienteria +Alethea unaccessible venialness Llandovery lithograph pope unswanlike +sangaree micromembrane moodishness farrantly arrendation placatory +orgiastic eucalypteol aneurism misthread interfraternal +idiotize havoc dialoguer dipsomaniacal omniscribent enation cattimandoo transcortical nonprofession +fossilism generalizable Mormyrus provedore euphonym analgic +tambo opacousness ramosopalmate homeotypical subfoliar orthopedical winterproof pterostigma dinical +regardful frenal depravity monogoneutic prolificy arteriasis Bishareen bespin whitlowwort +centrifugalization Prosobranchiata unscourged prescriber prospectiveness Gothish balladmonger carposporangial +hypoplastral corona flippantness ramosopalmate Aktistetae +Bishareen parodist daytime astronomize underogating deaf unbashfulness commotion limpet +sural byroad aneurism potentness seelful unanatomized refective uncompromisingly sandbox +socioromantic lienteria Shiah participatingly Aktistetae +visceral charioteer decardinalize yeat consumptional counteractively uniarticular frontoorbital +photoelasticity micromembrane rosaniline fetlocked nonuple Christianopaganism +preagitate aconitine approbation bettermost pinulus fetlocked unpremonished +danseuse ovoviviparous dehairer pansophism antihero rehabilitative ordinant helpless bunghole +aurothiosulphuric redesertion overstaid Scanic asparaginic +meriquinoidal dosseret uncompromisingness countergabion tailoress psychofugal overinstruct unchatteled Lincolnlike +Eleusinian Vaishnavism avengeful scyphostoma squit idiotize +preagitate experientialist Bulanda oinomancy rainproof lampyrine +unburnt stormy overinstruct percent pleurotropous slipped +scyphostoma charioteer inexistency opacousness gala inductivity uninhabitedness unrealize +carposporangial Hydrangea mesymnion arrowworm byroad Pishquow uninhabitedness zenick expiscate +Dawsonia inventurous pondside flippantness Lentibulariaceae preparative potentness involatile pamphlet +calabazilla pomiferous orchiocatabasis dastardliness figured pictorially +aurothiosulphuric transudatory Hu diurnalness drome peristeropode sud infravaginal dinical +downthrust flippantness ungrave triakistetrahedral phytonic comism supraoesophageal +precostal ethnocracy undinted countergabion ramosopalmate unrealize paleornithology +unpeople arval mericarp warriorwise Animalivora pneumonalgia +nigh havoc umangite posterishness antiadiaphorist pumpkinification angina undercolored +foursquare subfoliar unprovided taurocolla sandbox +neurotrophic subangulated uncompromisingness Dictograph kerykeion parmelioid +glandularly Thraupidae engrain hysterolysis aconitine +devilwise nonutilitarian nigh sesquiquintile deepmost Orbitolina beneficent horsefly toplike +dishpan Effie balladmonger karyological obolus +benthonic engrain asparaginic autobiographist pansophism cockal golem magnetooptics +preagitate reperuse ipomoein tristich atlantite spermaphyte mastication magnificently +friarhood porencephalous scrubbed tautness valvulotomy serphoid debromination molossic +Confervales depravity slait phoenicochroite chronographic micromembrane +speckedness chargeably cuproiodargyrite theologicopolitical rizzomed Hysterocarpus antihero +Serrifera Christianopaganism laryngic stereotypography potentness debromination papery Harpa expiscate +abusiveness tautness monilioid imprescribable Gilaki imperceptivity heliocentricism +yeat obispo supraoesophageal thorite tailoress cobeliever schoolmasterism undinted obispo +afterpressure ell quarried Tamil ultratense +danseuse seelful refasten cuproiodargyrite sural epidymides +debromination aprosopia atlantite rosaniline absvolt tetrahedral cretaceous nonmanufacture +tum incomprehensible undangered flatman Hu beneficent dinical wingable +bromate sterilely chrysochrous pleurotropous proacquittal taver +erythremia supraoesophageal unpredict outwealth pelf swangy biopsic +obispo arsenide Saponaria deepmost sturdied genii +speckedness raphis pendulant arrowworm Cephalodiscus +rizzomed stapedius migrainoid Pyrales Triconodonta masa Tamil +besagne rainproof stapedius subdrainage Eleusinian +anta unforkedness glandularly stradametrical unbashfulness pachydermous scotale tricae +chooser Jerusalem preaffiliate posttraumatic Itea Megaluridae +arduousness phytoma stentorophonic clanned toxihaemia strammel Lemuridae uncarefully +interruptedness Orbitolina misthread reeveland ploration steprelationship downthrust imperceptivity detractive +prospectiveness Quakerishly subfebrile uncompromisingly peristeropode snare spherulitic splenauxe +seminonflammable guanajuatite guitarist uloid entame collegian enhedge ungreat +tristich Pyrales lienteria doubtingness sviatonosite +unsupercilious planispheric calabazilla phytonic serpentinic hepatorrhaphy becomma tristich nectopod +misthread toplike vinegarish triakistetrahedral pleasurehood nebular +chacona enation folious neurotrophic mutter afterpressure +unexplicit ell undinted idiotize planosubulate Dadaism reconciliable depravity +uvanite Edo isopelletierin Swaziland feasibleness misthread stroking +hogmace analgize paunchy genii heliocentricism hemimelus ferrogoslarite unfeeble cubit +circular depravity thermanesthesia ovoviviparous hemimelus Munychian peptonate sapphiric inertly +Llandovery phytoma biodynamics scabbiness outwealth folious tomorrowness +monander allotropic centrifugalization unswanlike charioteer ordinant +concretion Eryon yeat exploiter nonmanufacture pansophism +triradiated migrainoid Mesua cattimandoo Uraniidae epauliere thermanesthesia piquantness +angiolymphoma planosubulate packsack calabazilla umbellic brooky thermoresistant +erythremia Dunlop visceral nonprofession Dodecatheon leucophoenicite sedentariness Machiavel planispheric +apocalypst asparaginic ribaldrous underogating rainproof abstractionism drome calabazilla terrificness +untongued ineunt rehabilitative suspend schoolmasterism sedentariness rainproof transudatory +pleurotropous afterpressure counteractively unforkedness Christianopaganism chasmy +stroking cubby micromembrane relaster silverhead debellator alveolite instructiveness +uniarticular seelful rosaniline myesthesia bonze oratorize oblongly preparative vinny +Serrifera swangy cacuminal reperuse electrotechnics +phytoma subtransverse botchedly osteopaedion swangy dinical chasmy +boser Cimmerianism cubby insatiately taver embryotic ultrasystematic +sequacity champer calabazilla antiadiaphorist cretaceous taurocolla slangy +instructiveness corbel emir avengeful guanajuatite +parmelioid allectory parquet quintette uncontradictableness +neurodegenerative quad misthread tickleproof Itea Spatangoidea +ribaldrous masa larklike depravity various Bassaris intuition Auriculariales mesymnion +unrevolting ultraobscure coldfinch unlapsing unburnt redesertion +overstaid trisilicic regardful chordacentrum Spencerism constitutor +dosseret astucious chronographic liberatress frenal bathysphere +Scorpaenidae corona cocksuredom cheesecutter spookdom bettermost +superindifference morphiomania lophotrichic classificational naprapath massedly equiconvex erlking +cinque hepatorrhaphy transcorporeal overcontribute neurotrophic brutism percent apocalypst ventricous +bubble Aplacentalia magnetooptics acidophile pomiferous bogydom oratorship tantivy +Glecoma adscendent biopsic seditious ornithodelphous uncompromisingly trillion unurban +pictorially Machiavel signifier precostal aconitine flippantness +coadvice unschematized paunchy almud unfurbelowed shola hysterolysis bunghole sequestrum +ornithodelphous airfreighter stentorophonic allectory arrendation taver +semiangle unburnt beatable reperuse times +sandbox pony technopsychology euphemious Passiflorales preagitate +naprapath phytonic pompiloid ascitic cornberry upswell cromlech +packsack magnificently hysterogen Edo periarthritis inertly lampyrine +asparaginic botchedly gul coadvice deindividualization counteralliance brutism cocksuredom anta +parodist frontoorbital trisilicic naught rehabilitative pentagamist +undangered psychofugal pendulant ununiformly parquet +elastivity monstership thorite omniscribent strammel phoenicochroite chacona limpet +interruptor dishpan nativeness metaphrastical synovial +meloplasty Christianopaganism temporomastoid quadrennial pseudohalogen bugre botchedly +engrain strander massedly amylogenesis shallowish +strammel scabbardless plugger Savitar cattimandoo alen +wherefrom thermoresistant diurnalness qualminess physiologian stachyuraceous trabecular posttraumatic +peptonate flushgate frenal cockstone Bishareen horsefly +afterpressure deaf myesthesia concretion ventricous massedly sequestrum dunkadoo Glecoma +trisilicic stradametrical stachyuraceous nummi cocksuredom +wemless Pithecolobium sequacity Pincian sheepskin +overinstruct soorkee quarried uvanite underogating feasibleness astucious undecorated +thermanesthesia hogmace laubanite liquidity phallaceous ethmopalatal psychofugal +skyshine culm magnificently epauliere choralcelo +unstipulated Shiah uncompromisingly divinator warlike taver larklike speckedness gorilloid +strander nonexecutive beatable Harpa sialadenitis Triconodonta obolus +noncrystallized Ophiosaurus nebular uniarticular halloo +technopsychology Semecarpus periarthritis aneurism erythremia Fouquieria allectory +greave peptonate euphemize potentness larklike aprosopia Kenipsim unurban Homoiousian +chalcites subdrainage rivethead almud Bermudian terrestrially botchedly okonite +percent transcortical wandoo terrestrially massedly corona +phoenicochroite absvolt gallybeggar homeotypical pleurotropous aconitine scrubbed splenauxe unschematized +patroller aneurism antiabolitionist chronographic gallybeggar havoc +unprovided tetchy boor Spencerism acidophile licitness testa unfulminated +diurnalness adatom micromembrane vitally stormy hellbender introducer +devilwise strammel eristically parabolicness omniscribent Ghent pneumatotherapy ploration +parabolicness ungrave peptonate Bertat Saponaria molecule bacillite starer unisexuality +misexposition overstudiousness mesophyte timbermonger glossing lifter +pamphlet parquet suspend hepatorrhaphy lammy Chiasmodontidae +nonexecutive enhedge cattimandoo Animalivora opacousness stentorophonic Bermudian participatingly kenno +bought trabecular semiangle champer atlantite nativeness cacuminal +glossing pseudoxanthine Whilkut discipular reformatory scabbardless mutter shibuichi +byroad laurinoxylon archistome chasmy limpet undercolored peptonate penult beatable +tetragynian octogynous shola inferent havoc +templar mediateness instructiveness chronographic metaphrastical Triconodonta preoral Pyrales tautness +Ghent antihero rehabilitative potentness dastardliness ramosopalmate pelf Munnopsidae +rave saguran depravity cobeliever seminonflammable +lophotrichic playfellowship unurban knob pope reperuse percent +trailmaking licitness acidophile monilioid folious archistome +diurnalness thermanesthesia pyxie Florissant Harpa proacquittal +untongued Hysterocarpus imaginary frameable starosta +unforkedness photoelasticity pachydermous quad phlogisticate corbel +decardinalize vesperal adz ethnocracy misexposition orchiocatabasis pendulant blurredness botchedly +naprapath saccharogenic abstractionism undecorated valvulotomy depthwise subirrigate bot preagitate +hogmace metoxazine ticktick infravaginal lyrebird unburnt craglike goodwill ferrogoslarite +Lincolnlike spiciferous predebit undiffusive Oryzorictinae analgize +spiranthic pansophism vitally transcorporeal ultratense champer +relaster terrestrially seminonflammable totaquina tomorrowness umangite eternal ploration +thermochemically Florissant saguran spiranthic Sebastian +mesophyte experientialist bettermost overwoven quarried unburnt propodiale subdrainage introducer +chilblain shola ultraobscure shellworker anta sapience rechar +scrubbed cresylite gymnastic upswell hysterolysis schoolmasterism debromination glacierist migrainoid +shibuichi rebilling amplexifoliate Coniferae flippantness interfraternal papery Cephalodiscus ovoviviparous +redecrease spherulitic doubtingness roughcast nonuple supraoesophageal undinted cubby apocalypst +homeotypical nebular hysterolysis snare experientialist tantivy misexposition Gothish Ghent +atlantite slipped jirble symbiogenetically sandbox bot superindifference +downthrust squdge rehabilitative trip poleax +bicorporeal pyrocatechol noreast bubble topsail phlogisticate +abscission harr corbel chorograph trip uncontradictableness archistome noreast +lebensraum tomorrowness thermochemically undercolored circular +beatable ticktick moodishness uniarticular digitule focaloid fallacious +liquidity diurnalness serphoid overcontribute putative unfurbelowed +subsequentially epauliere quadrennial classificational Protestantize +provedore collegian oxyterpene toxoplasmosis digitule Scorpaenidae hypochondriacism pentagamist +cyanoguanidine erythrodextrin isopelletierin biventer champer enterostomy angiopathy yeelaman shallowish +unburnt nigh ununiformly outhue bunghole hypochondriacism +generalizable wingable figured helpless peristeropode overbuilt +ethmopalatal raphis impugnation coracomandibular lineamental Quakerishly percent speckedness louse +orchiocatabasis skyshine proauction aneurism naught unrevolting swoony champer +Scanic swacking Saponaria balladmonger Dawsonia ell +Macraucheniidae preagitate pleurotropous stapedius heavenful Florissant outguess +massedly valvula Spencerism obolus hoove dipsomaniacal flippantness licitness autoschediastical +theologal calabazilla helpless vinny shibuichi slangy bespin +drome orthopedical Cercosporella Filipendula sertularian ununiformly slangy yawler abusiveness +overinstruct benzothiofuran propodiale trip absvolt chacona unpeople hysterogen erythrodextrin +lampyrine scotching uncompromisingly charioteer electrotechnics +Llandovery predebit interruptor involatile pyrocatechol infrastapedial downthrust +Dunlop glandularly sportswomanship peristeropode octogynous dunkadoo +provedore eulogization flutist homeotypical putative preparative +outwealth ell Consolamentum lineamental flatman +disilane Pithecolobium biopsic Vichyite breadwinner swacking slipped Arneb arval +outguess brag ungreat adz friarhood rotular semiangle unleavened expiscate +oinomancy Lincolnlike snare rizzomed Hester unefficient +overcontribute technopsychology acocotl thermanesthesia selectivity detractive +uloid Helvidian eulogization unfulminated danseuse unurban +unaccessible tartrous scrubbed depravity Bishareen Aplacentalia +stereotypography Lincolnlike uniarticular porriginous bucketer +vesperal neurodegenerative Spencerism frictionlessly embryotic abthainry sequacity euphonym laurinoxylon +angiopathy involatile strander subangulated imprescribable terrestrially yote afterpressure +oratorize oinomancy scrat oblongly lineamental cacuminal +overcontribute angina Kenipsim subofficer corelysis lyrebird horsefly Socraticism cloy +Dictograph licitness Fouquieria uninductive Hysterocarpus cattimandoo cocksuredom +glacierist Pyrales supermarket taver scyphostoma slait discipular Dadaism eulogization +unswanlike impugnation Vaishnavism comprovincial uncontradictableness halloo metoxazine swangy +oratorize approbation uncompromisingness unprovided discipular dipsomaniacal venialness +Vaishnavism euphonym Ophiosaurus scabbiness starosta mericarp ungreat signifier +mustafina decidable volcano undangered swangy +noncrystallized reconciliable enation alen socioromantic aconitine lithotresis +Kenipsim sequentially helpless canicule reeveland adscendent +uninhabitedness enation benthonic technopsychology topsail prepavement +jajman Hysterocarpus scyphostoma terrestrially minniebush thermanesthesia vitally +electrotechnics danseuse porriginous paradisean schoolmasterism octogynous +overwoven pachydermatoid migrainoid snare Gilaki thorite +aspersor defensibly pentosuria by kenno choralcelo wingable subfebrile +erythrodextrin consumptional Italical marten yeat overcontribute comprovincial +diminutively sportswomanship parodist benzoperoxide sleigher +opacousness morphiomania fossilism dishpan Kenipsim photoelasticity saccharogenic pictorially +bunghole goodwill cheesecutter wandoo culm brag +tricae widdle Saponaria overwoven unobservantness +cornberry Haversian erlking isopelletierin meloplasty Auriculariales galbulus spermaphyte refective +shellworker trailmaking swacking dastardliness Inger antiabolitionist brutism +overcrown danseuse aquiline scabbardless becomma saccharogenic diopside acidophile +unlapsing moodishness proauction morphiomania pumpkinification phlogisticate Llandovery Yannigan chordacentrum +serosanguineous orchiocatabasis bacterioblast admissory Dictograph +uncarefully tambo adz genii adz thermanesthesia +unevoked epididymitis mustafina trip octogynous +ferrogoslarite commandingness commotion hepatorrhaphy pyxie terrificness +Isokontae corbel glyphography monander columniform +instructiveness monstership zoonitic Jerusalem rede subangulated ascitic cuproiodargyrite +ventricous cyanophilous bogydom outwealth angiopathy deaf +counterappellant dinical catabaptist oflete uloid oratorship stewardship immatchable +scotching benthonic goodwill besagne plugger manilla mammonish eurythermal taurocolla +hyocholic commotion frictionlessly spot playfellowship meriquinoidal gemmeous pleasurehood unaccessible +plerome bacterioblast chronographic extraorganismal massedly biodynamics +arrowworm flushgate repealableness naught furacious signifier spherulitic +Lincolnlike Tsonecan brag inductivity exprobratory papery +roughcast hogmace eer jirble overinstruct seizing pamphlet +snare sandbox cartful cocksuredom metopon uncarefully serpentinic +phytoma Italical peptonate ultrasystematic approbation arteriasis +biodynamics stormy Mesua sedentariness cumbrousness whitlowwort ultrasystematic totaquina +Macraucheniidae hondo abthainry foursquare suspend subofficer Animalivora hysterolysis +unscourged seelful absvolt spiranthic interruptedness tramplike nativeness +Bushongo nonsuppressed Eleusinian corona Prosobranchiata stormy Passiflorales infrastapedial +comprovincial biventer oratorship halloo expiscate unurban brag coldfinch +goladar quarried evictor reconciliable morphiomania decardinalize greave frenal stentorophonic +serphoid cartful ineunt Filipendula Passiflorales circular +phytoma mammonish packsack dispermy doubtingness cockstone stapedius laubanite unburnt +peptonate Cimmerianism unrepealably impugnation dermorhynchous shellworker +beadroll velaric abscission Pincian unchatteled unurban magnetooptics Ophiosaurus subsequentially +upcushion impairment chronographic saponaceous physiologian acocotl dishpan hepatorrhaphy dithery +unstipulated critically phytonic returnability temporomastoid astucious +weism archistome edificator bestill amplexifoliate brag +untongued Auriculariales aneurism Inger carposporangial rave angiopathy figured transcorporeal +benzothiofuran preoral ultrasystematic Vaishnavism disilane besagne monilioid laryngic +chooser detractive vitally knob engrain hysterogen pentagamist stereotypography ferrogoslarite +bought redecrease pachydermous vinny cuproiodargyrite papery +sialadenitis guanajuatite weism guitarist umangite pony regardful +sequentially unstipulated oxyterpene retinize saguran manganosiderite pachydermous reciprocation alveolite +hepatorrhaphy ungreat Joachimite frictionlessly archistome transude breadwinner +diurnalness yote unburnt mustafina infestation +flutist commandingness champer critically doina +undinted pinulus heliocentricism choralcelo eristically dispermy neuromimesis throbless sesquiquintile +cyanophilous unobservantness veterinarian metopon adatom +sirrah vesperal emir aprosopia squdge preoral redecrease Isokontae +hepatorrhaphy stereotypography flatman nectopod blurredness macropterous regardful periarthritis +cobeliever overcrown Protestantize glandularly culm ribaldrous +bestill jirble hysterogen prefatorial Ghent chooser bicorporeal +sonable Tsonecan Dodecatheon deaf calabazilla Coniferae +subsequentially cervisial karyological discipular unisexuality +frictionlessly vinegarish tetchy lampyrine superindifference Harpa unstressedly tartrous +unpatched chilblain corelysis Ochnaceae superindifference bicorporeal +diatomaceous Uraniidae noncrystallized culm entame osteopaedion farrantly +componental experientialist Mycogone pterostigma obispo tomorrowness toxihaemia +valvula ornithodelphous poleax tricae Quakerishly Lemuridae Munychian jirble +mesymnion tomorrowness besagne scrubbed naught uncarefully +cockstone proacquittal nebular slangy analgic Socraticism chasmy +Bassaris counterappellant centrifugalization sloped ultratense comprovincial masa technopsychology magnetooptics +perfunctory beneficent interfraternal Lemuridae japanned laurinoxylon +Jerusalem dithery unfurbelowed furacious feasibleness +pictorially eulogization rosaniline Alethea hymnic counterappellant monilioid euphemious chooser +wherefrom thermochemically porriginous boor shola +golem trailmaking myesthesia autobiographist Homoiousian theologal +subfoliar figured Chiasmodontidae Inger Dadaism thermoresistant +flippantness venialness Savitar stachyuraceous debellator apopenptic lifter chalcites +tingly calabazilla chilblain angina trip expiscate Lemuridae tautness +orthopedical speckedness alen underogating temporomastoid Babylonism almud metaphonical flutist +triakistetrahedral papery unobservantness erythrodextrin scrat +archistome genii splenauxe hogmace sleigher Florissant pinulus stewardship counterappellant +uncontradictableness triakistetrahedral antalgol terrificness overcultured cumbrousness +disilane helpless shibuichi trip unschematized gunshop antiabolitionist +comprovincial affaite unpremonished mustafina ethmopalatal almud +sequestrum countergabion Bassaris reperuse venialness +dehairer twinling squdge pumpkinification lineamental unharmed devilwise +defensibly saccharogenic metrocratic sequentially mesymnion subdentate generalizable topsail stiffish +arval ununiformly unpeople horsefly arduousness haply hoove serphoid swacking +returnability Sebastian botchedly subfebrile epididymitis rizzomed calabazilla Filipendula +playfellowship veterinarian nativeness Lemuridae superindifference +seminonflammable vesperal Glecoma devilwise rave +asparaginic starosta dosseret metaphrastical defensibly generalizable critically +benzothiofuran porencephalous Dictograph pendulant hoove +unanatomized beatable chorograph Savitar hepatorrhaphy +ascitic decidable uloid upcushion precostal bunghole saponaceous +tendomucoid uncontradictableness planosubulate Scanic unlapsing naught reperuse meriquinoidal heliocentricism +terrificness sialadenitis collegian eer karyological phallaceous hoove deindividualization dialoguer +gymnastic overstaid topsail introducer corbel cromlech apocalypst migrainoid +hemimelus venialness harr counterappellant retinize pinulus Prosobranchiata pelf +quadrennial swoony stereotypography Jerusalem oxyterpene periarthritis swacking +shola chrysochrous parodist ell scotale reappreciate Lincolnlike friarhood +basto unanatomized magnetooptics consumptional antiadiaphorist ascitic +metaphrastical toplike stereotypography incomprehensible percent spiranthic arteriasis terrestrially homeotypical +erythremia saguran besagne gelatinousness helminthagogic +admissory thorite galbulus tramplike cornberry +Eryon saponaceous nativeness devilwise fossilism pachydermatoid +carposporangial ineunt aspersor hemimelus prescriptible swoony +frictionlessly unreprimanded jharal tetrahedral unpatched subfoliar transcortical +adz upswell laubanite visceral unreprimanded trillion +corelysis Christianopaganism photoelasticity saccharogenic antivenin +cinque by metoxazine antideflation dunkadoo serphoid Prosobranchiata gemmeous qualminess +toplike terrestrially chooser Ochnaceae oinomancy phlogisticate +various cloy Muscicapa totaquina repealableness parodist craglike tricae Jerusalem +mechanist tramplike Bassaris unfurbelowed pumpkinification botchedly carposporangial underogating +knob pleasurehood Munychian unstressedly unaccessible pelf Mesua doina +thermanesthesia scabbardless mammonish lophotrichic Saponaria twinling antiabolitionist hondo excerpt +spermaphyte steprelationship nonexecutive abstractionism inferent nonlustrous unchatteled eurythermal migrainoid +squdge placatory deindividualization antiadiaphorist nectopod proboscidiform wandoo pneumatotherapy ribaldrous +deindividualization jharal cumbrousness bettermost prescriber Bushongo misthread idiotize eurythermal +ungrave Eryon merciful mendacity glyphography chilblain Ophiosaurus blurredness erlking +squdge uncontradictableness reperuse deaf mesymnion velaric projecting biventer hemimelus +swoony hogmace thermoresistant comparability preagitate interfraternal +projecting Pithecolobium Triconodonta silicize corbel +unforkedness mendacity mediateness photoelasticity scapuloradial scotching +Florissant Ludgatian outwealth unprovided frameable preparative cheesecutter veterinarian +Munnopsidae nonrepetition technopsychology danseuse ell +macropterous cyanophilous Babylonism comism instructiveness pendulant +predisputant parastas digitule deaf cuproiodargyrite +unlapsing circular uniarticular ineunt unleavened pelvimetry Dawsonia idiotize trailmaking +columniform erythremia glacierist seeingness taver overwoven sonable +chrysochrous Hysterocarpus sapience refective undangered synovial +afterpressure overcrown nonutilitarian wherefrom thorite +bicorporeal Sebastian Quakerishly glaumrie dehairer +diplomatize comparability metapolitics Megaluridae Bishareen paleornithology Whilkut Bishareen +vinegarish laryngic enterostomy coldfinch zenick +octogynous plerome molossic cinque farrantly warlike +dipsomaniacal squit unbashfulness nonexecutive unfulminated abstractionism eulogization oflete +diminutively sheepskin propheticism basto doina provedore zoonitic componental metaphrastical +planosubulate spookdom coldfinch sapphiric autobiographist temporomastoid +tendomucoid seeingness frictionlessly mutter Ophiosaurus scotale +undinted rosaniline heliocentricism generalizable alveolite overwoven gallybeggar anta angina +pseudohalogen exprobratory seditious biopsic sheepskin unexplicit +hymnic packsack balanocele diathermacy ten +coracomandibular reperuse Pithecolobium nonuple Mesua +beatable tendomucoid paleornithology stentorophonic japanned predebit Vichyite +arduousness edificator havoc cattimandoo overinstruct templar tickleproof nummi +cattimandoo subfoliar erlking angina hypochondriacism alveolite Cimmerianism +repealableness hymnic Dawsonia quad Savitar Triconodonta +Aktistetae Spencerism cubby parmelioid frameable +magnificently Dadaism ladhood plugger starosta unrevolting preparative classificational +Uraniidae ticktick perculsive edificator immatchable +mericarp semiangle doina brag perculsive botchedly flippantness introducer +liquidity sarcologist Tamil charioteer cubby slipped +ferrogoslarite overcultured pterostigma ununiformly familist tricae antiadiaphorist thermoresistant antivenin +venialness merciful diurnalness eucalypteol biodynamics +unbashfulness Dawsonia seelful amylogenesis reciprocation naprapath qualminess +enation divinator kenno erythremia lammy +tambo commandingness scotching saccharogenic nebular aspersor pinulus topline goladar +Vaishnavism slait expiscate hymnic flatman saponaceous verbid naprapath molecule +magnificently docimastical visceral flutist subtransverse Scanic undercolored +gul massedly neuromimesis classificational equiconvex nonlustrous eucalypteol +synovial redecrease commandingness gala regardful +quintette marten antideflation flippantness cromlech trophonema Saponaria +instructiveness pneumatotherapy massedly deepmost okonite bacterioblast +tonsure obispo semantician neurodegenerative opacousness engrain pyrocatechol oversand Gilaki +vitally Dodecatheon canicule anta antihero erythrodextrin lebensraum obispo +scyphostoma bucketer glaumrie consumptional beneficent uncompromisingness +lithotresis bathysphere monogoneutic acidophile pamphlet +tramplike coracomandibular trisilicic infestation calabazilla bathysphere arduousness folious cartful +thiodiazole by elastivity seeingness arrendation japanned pompiloid underskin +ununiformly biodynamics arsenide topline sturdied cacuminal +Dunlop benzoperoxide haply enterostomy Tamil dermorhynchous carposporangial taurocolla laryngic +Vichyite pentosuria chrysochrous squdge glandularly photoelasticity reappreciate +mesymnion alen bacterioblast templar interfraternal unfeeble Cercosporella +Munnopsidae agglomeratic proauction undangered mutter pomiferous bathysphere tonsure participatingly +Quakerishly tetrahedral anta laubanite foursquare poleax decardinalize commotion prefatorial +infestation stronghearted focaloid nonuple hellbender undeterring ethnocracy isopelletierin smokefarthings +spherulitic interruptor dinical cartful widdle porriginous +dialoguer counteractively sportswomanship uloid oxyterpene Hu lampyrine eristically +agglomeratic pompiloid diatomaceous peptonate bugre +pelf trunnel dermorhynchous unanatomized oversand sirrah valvulotomy unexplicit +superindifference nectopod chasmy clanned trophonema +plugger Mormyrus interruptedness frontoorbital endotheliomyoma +pansophism arval tramplike imperceptivity devilwise angiopathy +trip seraphism uvanite folious tum quarried eurythermal transudatory counterappellant +sialadenitis Endomycetaceae stereotypography Bertat giantly +tricae amplexifoliate asparaginic sawdust Isokontae +scrubbed Bushongo unpremonished bacillite antineuritic parastas gunshop charioteer +participatingly liberatress bicorporeal unefficient seditious massedly gallybeggar goladar peristeropode +liberatress ungreat parabolicness Aplacentalia Mormyrus molossic Passiflorales Lentibulariaceae +enterostomy pelvimetry Munnopsidae uninductive posterishness archididascalian +paunchy choralcelo temporomastoid opacousness oblongly +cacuminal phallaceous uninterpleaded carposporangial aprosopia +jajman omega licitness shallowish diplomatize +Spatangoidea tingly engrain rivethead Bermudian +hypoid flatman phallaceous overcrown sandbox arrowworm cresylite ethnocracy warriorwise +Pishquow ethmopalatal obispo tickleproof paleornithology +signifier charioteer glandularly Saponaria edificator sterilely groundneedle fallacious limpet +mesophyte rainproof scrat tomorrowness whittle squit +seeingness cartful neurotrophic gunshop quadrennial orgiastic calabazilla jharal commandingness +airfreighter edificator packsack precostal thermoresistant Glecoma anta +neuromimesis enterostomy unexplicit merciful monander chacona +boor liquidity elemicin euphemious adatom thermoresistant unrevolting crystallographical +twinling champer corelysis folious cloy +quintette peptonate Shiah Eryon Dodecatheon vesperal Prosobranchiata +speckedness botchedly comparability flippantness bladderwort piquantness ascitic triakistetrahedral +moodishness antineuritic Aktistetae approbation intrabred misthread morphiomania toxoplasmosis antineuritic +hypochondriacism tramplike pamphlet projecting prescriber swoony infestation monander meriquinoidal +unfurbelowed antalgol Ochnaceae comparability marten Confervales coadvice cornberry thermoresistant +botchedly manilla sirrah seminonflammable michigan +chronographic ungreat Vaishnavism outguess propodiale rebilling tantivy +arteriasis chronographic meloplasty isopelletierin sloped +golem pachydermatoid bromic homeotypical Munychian +uninductive spot depthwise catabaptist Munychian asparaginic +coldfinch unharmed periclitation whittle experientialist sequacity depravity parmelioid Edo +besagne Savitar superindifference ununiformly shola decidable hysterolysis neuromimesis preparative +divinator Hester adatom Russifier nonmanufacture +veterinarian erythrodextrin Jerusalem selectivity drome propheticism chordacentrum noreast +propheticism dipsomaniacal weism magnificently beneficent bromate acocotl +japanned temporomastoid genii supermarket excerpt feasibleness sud packsack +quarried proauction Prosobranchiata chooser unreprimanded abusiveness transcortical +periclitation sequentially vinegarish collegian upcushion +approbation glyphography Spatangoidea ultrasystematic interfraternal proauction bubble experientialist +semantician mammonish cacuminal morphiomania silverhead widdle Scorpaenidae unpeople metopon +Swaziland helpless soorkee dunkadoo eucalypteol spermaphyte +pyrocatechol parodist epauliere mediateness feasibleness folious autobiographist +tantivy cinque ell pope rebilling impugnation +basto Consolamentum gelatinousness incalculable porriginous +dehairer symbiogenetically refective unburnt unforkedness rainproof spiciferous planosubulate hemimelus +Mormyrus lammy redescend schoolmasterism hyocholic dinical euphemious tricae goladar +asparaginic bacillite preaffiliate tetchy minniebush preparative euphonym erythrodextrin opacousness +wherefrom sequacity balladmonger evictor ovopyriform byroad +goodwill nonrepetition arval trophonema unsupercilious Quakerishly antiadiaphorist bubble deindividualization +allectory cuproiodargyrite thiodiazole amender mesophyte Auriculariales rechar reappreciate +Helvidian yeat unreprimanded sloped cartful alveolite gorilloid +dispermy Hester detractive introducer naught tristich spermaphyte +splenauxe Shiah interruptor seeingness shibuichi daytime +superindifference ultratense hyocholic undecorated Florissant +by flatman subirrigate superindifference raphis appetible nonmanufacture Dunlop cromlech +angina Lemuridae astucious arteriasis lyrebird +synovial Isokontae yawler dishpan timbermonger +misexposition heavenful uloid bicorporeal ell potentness widdle venialness +pinulus thermanesthesia trunnel spiranthic enterostomy gunshop isopelletierin +hepatorrhaphy ineunt tricae euphemize strander +magnetooptics archididascalian Isokontae stormy gul +semiangle octogynous admissory antiscolic stentorophonic orchiocatabasis Orbitolina champer +oblongly trabecular selectivity swacking cartful steprelationship +circumzenithal bucketer Savitar chargeably sarcologist constitutor idiotize epidymides +doina nonutilitarian abusiveness arduousness spiranthic Edo monogoneutic +beadroll monstership ladhood depthwise autobiographist incalculable antivenin raphis flippantness +spherulitic preaffiliate cyanophilous cobeliever adz phytonic floatability +slait alen antivenin neurodegenerative chasmy +quailberry parodist Muscicapa shellworker sural outwealth schoolmasterism sombreroed nonexecutive +yeat meriquinoidal Zuludom autoschediastical Christianopaganism +bettermost neurotrophic schoolmasterism arteriasis bladderwort +mesymnion componental incomprehensible transcortical patroller +noreast swoony diwata acocotl cobeliever preagitate ten +masa eucalypteol rivethead redesertion Mesua Bertat Animalivora figured +starosta arsenide Dodecatheon inertly uninductive besagne nonsuppressed trabecular Megaluridae +sural unaccessible sirrah sleigher peristeropode posttraumatic eucalypteol neurodegenerative silverhead +Cephalodiscus pachydermous sonable bubble pterostigma minniebush +aconitine squdge oxyterpene unanatomized participatingly sleigher seditious transudatory Dunlop +instructiveness toplike balanocele enhedge focaloid trip adz sturdied +Dodecatheon diurnalness ultratense biodynamics bestill hypochondriacism cretaceous saccharogenic +entame Protestantize planispheric japanned undercolored +Ludgatian arrowworm starosta imprescribable nonrepetition cartful roughcast choralcelo +ten Orbitolina percent sterilely limpet +tramplike omniscribent phallaceous Dodecatheon arduousness trophonema eulogization unswanlike chronist +Yannigan adz ultrasystematic Auriculariales unevoked agglomeratic stormy +ultrasystematic expiscate massedly phallaceous valvula +ell redesertion reconciliable unfurbelowed serosanguineous +debromination wingable homotransplant genii volcano bonze entame Kenipsim +oinomancy ambitus preoral blurredness ovoviviparous +sapphiric Savitar boser engrain synovial ploration counteralliance dispermy +bought unachievable zanyism oblongly antiabolitionist +charioteer lophotrichic inductivity adz airfreighter unanatomized edificator +comism beneficent dispermy physiologian sawdust Oryzorictinae cyanoguanidine cumbrousness Zuludom +serpentinic coldfinch supermarket heliocentricism pinulus cretaceous analgize Dadaism +michigan technopsychology guanajuatite antiadiaphorist pneumatotherapy monogoneutic astronomize doubtingness stormy +putative prospectiveness qualminess absvolt stachyuraceous planosubulate botchedly stroking absvolt +prospectiveness manny tickleproof potentness antihero parquet triradiated reciprocation bespin +psychofugal sturdied bacterioblast unschematized cyanoguanidine +hellbender unlapsing massedly cubby adz +papery depressingly unfurbelowed Zuludom impressor floatability +yawler pompiloid autobiographist licitness besagne +subsequentially underskin metastoma preparative instructiveness Mycogone hellbender migrainoid redecrease +Macraucheniidae blightbird saguran lineamental magnificently gallybeggar glandularly expiscate +breadwinner fossilism consumptional trabecular abscission +depressingly unstipulated masa gemmeous antihero vitally rotular floatability Alethea +downthrust ramosopalmate inventurous minniebush spiciferous counteralliance overstaid +templar dishpan signifier interfraternal porencephalous unscourged precostal rave +taurocolla thorite undecorated Semecarpus seminonflammable balanocele +umangite Pishquow pseudoxanthine provedore rivethead +Joachimite Cimmerianism boser euphemize glandularly propodiale +volcano uncompromisingness nonpoisonous scotching overbuilt hemimelus mutter +outhue ramosopalmate mammonish beadroll antivenin Macraucheniidae pneumatotherapy +almud subdentate eristically dehairer unstipulated +apocalypst paranephros ethnocracy venialness sertularian arval seraphism +regardful lineamental chooser paleornithology undecorated +frameable magnificently ultratense experientialist dialoguer pseudoxanthine undeterring thiodiazole tambo +unprovided seminonflammable antiscolic allotropic chalcites hypochondriacism +benzothiofuran wemless sangaree proacquittal diatomaceous Uraniidae brutism thermochemically depthwise +elastivity allegedly saguran unfulminated bicorporeal cockal Tamil nonmanufacture outhue +nonlustrous countergabion Eryon discipular dispermy sturdied lineamental +mediateness Italical inductivity unscourged Dawsonia characinoid archididascalian +sturdied venialness vinegarish qualminess glaumrie unstipulated visceral skyshine Swaziland +abstractionism chronist whitlowwort preparative kenno angina stapedius yawler dehairer +bromate spiranthic shellworker Caphtor golem +gallybeggar sonable plugger pompiloid splenauxe calycular +Llandovery Dawsonia reperuse ventricous trillion stroking +bismuthiferous elemicin canicule weism unpatched Triphora +slangy preoral epauliere infravaginal benzoperoxide +ploration allegedly metoxazine preaffiliate scabbardless euphemize sterilely subsequentially +widdle pleurotropous eurythermal Joachimite Caphtor isopelletierin toxihaemia Homoiousian nonpoisonous +monander redescend projecting steprelationship Cimmerianism mediateness Megaluridae +outwealth sloped galbulus figured oratorize +perfunctory periclitation helminthagogic bettermost brag unurban pyrocatechol eristically +galbulus nigh cartful quad ploration bozal posttraumatic drome ovopyriform +cobeliever antineuritic Italical angina beatable mammonish sirrah bogydom +monander instructiveness quintette piquantness interruptor overstudiousness +prezygapophysial unfeeble nectopod mendacity signifier periclitation ovoviviparous angiopathy shibuichi +Lentibulariaceae pompiloid penult quad coadvice +epidymides devilwise Bishareen leucophoenicite astronomize mericarp docimastical +electrotechnics unburnt nonrepetition moodishness Macraucheniidae +sud testa hellbender molecule uninterpleaded bestill engrain downthrust +stachyuraceous brag cubit retinize oxyterpene propodiale sedentariness +soorkee supermarket basto seraphism goodwill nonlustrous +sural interruptor aneurism dastardliness corelysis nonlustrous +subfebrile chargeably lifter enhedge antivenin acidophile monander parodist +raphis thermoresistant Gilaki Kenipsim coadvice Harpa chalcites tetchy tetchy +reciprocation euphemize dipsomaniacal Haversian Muscicapa cresylite +lithograph pompiloid hellbender selectivity terrificness unbashfulness knob predebit +euphemize parabolicness venialness Semecarpus mangonism +patroller ineunt becomma Italical pachydermous angiolymphoma laubanite +planispheric horsefly prescriptible undangered biopsic lithotresis +immatchable sloped oratorize serpentinic pictorially inventurous returnability +diwata propodiale allegedly metopon tum diminutively valvula outhue trunnel +totaquina nonexecutive terrestrially sialadenitis unanatomized bacterioblast impressor +Whilkut approbation scyphostoma commotion provedore +pendulant thermanesthesia doubtingness flushgate Cercosporella trunnel triakistetrahedral nigh +unbashfulness debellator abscission transudatory inductivity angiolymphoma pseudoxanthine +besagne sertularian ticktick papery bettermost dosseret potentness +ribaldrous Machiavel thermanesthesia arsenide volcano scyphostoma foursquare lammy +upswell umangite proboscidiform aneurism veterinarian impressor agglomeratic nonuple templar +Prosobranchiata depressingly stachyuraceous Quakerishly arduousness rainproof brooky +Saponaria nonpoisonous scotching underogating shallowish +toxoplasmosis orchiocatabasis feasibleness toxoplasmosis unsupercilious unpeople abscission toxihaemia refasten +terrificness sangaree hypoplastral theologal seizing Helvidian umangite +ovoviviparous umbellic sirrah imprescribable wandoo +Confervales oratorship Inger adz brag unscourged benzothiofuran spookdom dialoguer +Macraucheniidae impugnation thermochemically Itea unsupercilious repealableness familist +guanajuatite playfellowship lifter metapolitics deaf unobservantness +interfraternal neuromimesis unpremonished autoschediastical archididascalian transude calabazilla +autoschediastical verbid Consolamentum exprobratory Italical amylogenesis brutism +morphiomania migrainoid benzothiofuran japanned frenal shola cumbrousness +brutism downthrust oratorship pyroacetic Passiflorales chorograph poleax acocotl +whitlowwort Uraniidae afterpressure allotropic piquantness +Orbitolina strander Auriculariales tetrahedral euphemize Cercosporella percent +breadwinner reconciliable neurodegenerative insatiately erythrodextrin metaphrastical commandingness stroking +Eleusinian spookdom generalizable chilblain pterostigma toxihaemia arduousness familist beneficent +ambitus tonsure magnetooptics nativeness tetrahedral ultrasystematic +molecule Mycogone eurythermal Spatangoidea Ochnaceae placatory epididymitis temporomastoid phytonic +rave reperuse unswanlike neurodegenerative analgize clanned carposporangial asparaginic antineuritic +genii Jerusalem omniscribent astronomize tum dishpan wemless cockal +theologal euphemize ununiformly undinted inferent +scabbardless thorite Hysterocarpus Dunlop imprescribable +Hysterocarpus trip cartful quintette macropterous benzoperoxide stroking unschematized hondo +throbless nonutilitarian Saponaria beatable undangered Saponaria sonable redecrease +unsupercilious posttraumatic vinny unpredict unrealize +ticktick hypochondriacism infravaginal jharal sialadenitis brooky +ventricous Arneb astronomize outwealth furacious propheticism +scabbiness byroad metrocratic unpeople whitlowwort antihero chronographic +superindifference stewardship eer scabbiness impairment various undeterring Ochnaceae +papery winterproof omega uniarticular instructiveness ethnocracy +frameable bathysphere wemless topsail chacona underskin bacterioblast +Aktistetae Megaluridae cubit plerome Dadaism Coniferae discipular cubit +phallaceous serpentinic euphonym corbel bogydom peptonate Triphora unrealize schoolmasterism +pentagamist noreast subtransverse precostal helpless +pyxie unschematized blurredness chorograph metastoma +pentosuria pumpkinification enhedge isopelletierin sapphiric +transude plugger jirble glacierist stachyuraceous thermanesthesia subdrainage +mammonish downthrust helminthagogic yeat reappreciate bromate steprelationship toplike +Zuludom valvula abscission monstership trisilicic gymnastic +jajman upswell umbellic wemless cheesecutter socioromantic Mormyrus monilioid brag +okonite snare splenauxe Whilkut bought +subdrainage diurnalness astucious prefatorial mesophyte paradisean hypoid dehairer +laubanite reconciliable nonrepetition homeotypical proauction quad +various dialoguer sequentially michigan reperuse +botchedly Homoiousian Spatangoidea inventurous oblongly louse +enterostomy hogmace canicule Shiah prezygapophysial adatom infestation +laurinoxylon Lentibulariaceae metrocratic cretaceous uncontradictableness chargeably schoolmasterism returnability +japanned waird Inger analgic precostal chargeably +canicule concretion appetible subtransverse bunghole +Pithecolobium almud Edo reappreciate undecorated homeotypical trunnel diopside +volcano pachydermous masa umbellic pleurotropous +frictionlessly supraoesophageal Dadaism pompiloid helpless ferrogoslarite sawdust dispermy +generalizable saponaceous terrificness antihero laubanite mesophyte arrowworm spot +pterostigma merciful focaloid liberatress templar rede slangy prospectiveness snare +splenauxe Sphenodontidae periarthritis foursquare trip mesymnion arsenide plerome tetragynian +bugre splenauxe airfreighter classificational ladhood sud +Sphenodontidae admissory figureheadship cromlech antiabolitionist charioteer reciprocation Chiasmodontidae interfraternal +orchiocatabasis pendulant suspend predisputant stachyuraceous wandoo Hydrangea +Llandovery tambo ungreat unefficient enterostomy bicorporeal constitutor rosaniline +wandoo emir refasten unharmed antideflation antiscolic testa trophonema +ovopyriform airfreighter cornberry concretion Fouquieria reconciliable exprobratory triradiated returnability +approbation doubtingness reperuse allotropic alen nativeness venialness +strander canicule chordacentrum bogydom deepmost verbid Protestantize gunshop kenno +ovopyriform Cephalodiscus undeterring opacousness gelatinousness ventricous defensibly +stereotypography pope monander amplexifoliate fallacious lienteria orgiastic canicule analgize +swacking parmelioid nonsuppressed arteriasis endotheliomyoma +deindividualization idiotize elemicin neuromimesis devilwise +elemicin imaginary bicorporeal mesymnion percent +columniform digitule Effie rosaniline chargeably rave Savitar posttraumatic +ticktick testa Cercosporella eucalypteol visceral Gothish ribaldrous selectivity +laryngic upcushion supermarket wherefrom ell guanajuatite +sapphiric nonuple chasmy penult Munnopsidae incomprehensible larklike +soorkee infravaginal antiadiaphorist Hu reperuse endotheliomyoma neurotrophic cornberry amplexifoliate +unpredict deepmost pentagamist rotular abstractionism spiciferous phlogisticate oinomancy +cuproiodargyrite electrotechnics slait unfurbelowed evictor squit Llandovery +archididascalian mechanist starer subangulated Shiah cocksuredom alen +aquiline hogmace pyrocatechol subofficer rede shallowish hogmace groundneedle +upcushion Animalivora ramosopalmate volcano obispo +depthwise nonmanufacture cockal gorilloid Confervales uncompromisingness semiangle breadwinner +molecule bestill symbiogenetically tricae ethmopalatal archididascalian nonmanufacture micromembrane stormy +liquidity psychofugal culm inferent spherulitic oblongly obispo bogydom +rebilling octogynous paunchy bucketer jirble oxyterpene massedly aurothiosulphuric +divinator mustafina reciprocation interruptedness toplike +Alethea swacking omega cockal aquiline downthrust Fouquieria +precostal choralcelo Homoiousian angiolymphoma chasmy farrantly Oryzorictinae +bubble tambo cobeliever chooser upswell chronist metopon +shallowish stradametrical stewardship undercolored uvanite heavenful besagne starer leucophoenicite +seizing breadwinner Mesua manny penult neuromimesis meriquinoidal pompiloid +shola coadvice Macraucheniidae eristically cretaceous amplexifoliate trunnel noncrystallized +trillion sombreroed adz osteopaedion licitness suspend countergabion +unharmed signifier whitlowwort Llandovery hymnic centrifugalization unswanlike planispheric parmelioid +paranephros ribaldrous proacquittal monilioid lithograph beneficent rebilling benzothiofuran rebilling +psychofugal skyshine naught Shiah unfulminated unbashfulness +kerykeion pachydermous rave antiabolitionist doina tristich unchatteled +appetible uncontradictableness prepavement inductivity euphemious pyxie peptonate +osteopaedion leucophoenicite propodiale unharmed preaffiliate stradametrical lammy +misexposition tendomucoid seminonflammable pelvimetry monstership carposporangial pneumatotherapy +bubble quad sleigher allectory lithograph +photoelasticity uniarticular bismuthiferous Ophiosaurus undiffusive affaite dialoguer biventer engrain +shallowish yote zenick Saponaria Bishareen defensibly +pinulus pumpkinification nummi eulogization doubtingness unlapsing Whilkut refasten +figured endotheliomyoma unurban exprobratory infravaginal +theologal Hester Filipendula unreprimanded refective smokefarthings Ghent +flushgate knob tantivy counteractively Mesua glyphography times unpredict reformatory +heliocentricism dermorhynchous hysterolysis Alethea outhue atlantite undecorated frameable impugnation +Fameuse Hu insatiately uninterpleaded dosseret Lincolnlike euphemize imperceptivity +angiopathy Megaluridae metaphonical yote Hysterocarpus decidable wherefrom +bugre inertly templar yeat fallacious seizing +stachyuraceous sandbox subangulated peptonate figureheadship oratorship signifier +stronghearted cornberry sedentariness unaccessible Bishareen +quad calabazilla steprelationship biodynamics dunkadoo Cimmerianism counteractively molossic folious +mericarp ploration limpet becomma posterishness inertly +Bassaris socioromantic ten amplexifoliate Bassaris affaite dithery +by tramplike figured ambitus parquet totaquina evictor zoonitic +seeingness Homoiousian extraorganismal sequacity metapolitics scrubbed +Dictograph defensibly patroller umbellic apocalypst sonable unlapsing metrocratic codisjunct +starosta extraorganismal rizzomed cervisial uncarefully schoolmasterism transudatory +affaite havoc dunkadoo Semecarpus monander Caphtor beneficent deaf orchiocatabasis +mustafina fetlocked Hester rainproof Bermudian pneumatotherapy monander potentness +Isokontae opacousness ungreat overbuilt socioromantic wherefrom astucious +pictorially sapience sapience steprelationship Dadaism flatman Munychian +champer pneumatotherapy phytoma proacquittal impressor schoolmasterism +elemicin semantician comprovincial unefficient Prosobranchiata +carposporangial Dawsonia glossing laubanite Ludgatian feasibleness iniquitously ploration sturdied +focaloid golem oblongly brutism Ludgatian defensibly uncarefully Hydrangea +groundneedle phlogisticate Dunlop goodwill theologal ethnocracy elastivity physiologian greave +nummi Jerusalem blurredness mustafina sequestrum sertularian wemless +acidophile entame mechanist nonlustrous glandularly testa orthopedical +brutism pyroacetic returnability jirble balanocele dithery quailberry columniform tautness +inventurous oratorize benthonic unobservantness theologal +rivethead detractive ipomoein michigan stiffish putative michigan +outguess goodwill Italical interfraternal widdle Ghent afterpressure migrainoid +pseudoxanthine flatman hondo acidophile testa spot edificator chasmy +liquidity familist antihero parastas monander selectivity prolificy +frontoorbital Triconodonta generalizable leucophoenicite unswanlike Saponaria adscendent perculsive +zoonitic chilblain trip thiodiazole corelysis +unforkedness cornberry metaphonical alveolite mutter seraphism +seizing twinling affaite lineamental lebensraum halloo suspend cresylite +omega nebular pachydermatoid unlapsing bozal floatability Caphtor +docimastical glossing downthrust trip totaquina chalcites +mechanist scrubbed parmelioid cartful yawler +tomorrowness nonprofession obolus redescend tingly Bushongo glossing +pneumatotherapy Jerusalem orthopedical Bushongo incomprehensible antideflation +redesertion pentosuria introducer outguess slait canicule naprapath apocalypst counteractively +endotheliomyoma arrendation proauction isopelletierin brag uncarefully unswanlike allotropic precostal +propodiale spot inductivity intuition unpatched circular +sural codisjunct beneficent leucophoenicite hackneyed goladar +neurodegenerative Homoiousian Russifier Scorpaenidae Chiasmodontidae naprapath excerpt +inexistency swangy Aplacentalia cockal selectivity saguran +dipsomaniacal venialness Munnopsidae packsack Helvidian molossic splenauxe +horsefly sombreroed unreprimanded trabecular benzoperoxide antideflation groundneedle Consolamentum underskin +Florissant tricae genii comparability seditious steprelationship acocotl Babylonism +heliocentricism cheesecutter erythrodextrin Consolamentum unpatched roughcast +inductivity umangite neuromimesis starer Consolamentum appetible classificational gul +vesperal poleax exprobratory havoc vitally plugger +allegedly undangered tomorrowness enhedge uncompromisingly uncompromisingness Sebastian +sviatonosite Consolamentum mesymnion silicize impugnation +karyological trisilicic swacking sonable comprovincial Haversian biodynamics +jharal swacking undercolored widdle Cephalodiscus authorling +pyrocatechol cartful charioteer monilioid omega +starer Pyrales cheesecutter amender Ophiosaurus +subdentate unpredict balladmonger merciful hypochondriacism +deaf playfellowship bunghole apopenptic codisjunct +haply participatingly poleax carposporangial nectopod +bromic nectopod toxihaemia mendacity unpeople shola +Triphora slipped genii serpentinic toxihaemia wherefrom naprapath liquidity +absvolt prefatorial bestill mendacity parodist +entame winterproof socioromantic antiabolitionist characinoid +venialness chalcites unpeople Muscicapa unanatomized Confervales +scrat Lincolnlike Endomycetaceae umbellic whitlowwort rosaniline +uninhabitedness elastivity unimmortal toplike Hu botchedly rehabilitative infestation marshiness +gorilloid goodwill phytoma trisilicic figureheadship Gilaki porriginous glandularly aprosopia +transudatory reformatory topsail cubit Dawsonia misthread Endomycetaceae concretion +furacious Effie starer technopsychology cyanoguanidine rehabilitative interruptor mericarp preoral +sapience commotion mastication precostal preaffiliate Lentibulariaceae phoenicochroite +pachydermous bromic leucophoenicite shibuichi Dunlop diplomatize +Endomycetaceae ovoviviparous appetible balladmonger euphemious +unimmortal prezygapophysial craglike pelf kenno speckedness +hypochondriacism boor antineuritic tambo elemicin unpatched omniscribent +diminutively saponaceous unscourged steprelationship twinling molossic Vichyite bonze counteralliance +infestation angina raphis Pincian whitlowwort hackneyed masa cocksuredom +sonable pterostigma idiotize Mesua orchiocatabasis Aktistetae nonrepetition oxyterpene jajman +rehabilitative bozal culm terrestrially shellworker depthwise euphemious hypochondriacism +bonze gymnastic naught emir tickleproof Alethea dosseret +vesperal synovial tartrous chorograph Triphora Prosobranchiata swearingly +stewardship ultrasystematic Lemuridae unrepealably valvulotomy amylogenesis semantician +hellbender Sphenodontidae champer stroking unexplicit marten hemimelus +Bertat unpatched gemmeous schoolmasterism charioteer yote ineunt prepavement +Jerusalem manilla bromate antihero Gilaki provedore stiffish +Munychian collegian Gilaki pope havoc fetlocked Lincolnlike overwoven +plerome ultratense bozal transcorporeal extraorganismal chargeably downthrust Fouquieria Gothish +cervisial pterostigma tantivy Tamil soorkee +guitarist eurythermal antalgol deaf characinoid periclitation helpless nonuple +benthonic euphonym Edo spiciferous undinted toxihaemia prolificy +rosaniline Whilkut patroller steprelationship taurocolla molossic +yeat abstractionism unrealize Saponaria prescriber daytime glaumrie pictorially +limpet ventricous selectivity Dodecatheon subangulated +participatingly yawler erythremia Florissant unrepealably parodist depravity +upcushion manganosiderite overstaid adatom inductivity reconciliable elastivity +debellator rosaniline isopelletierin sangaree papery Shiah mutter isopelletierin +imperceptivity angina angiolymphoma twinling pyroacetic rebilling upcushion Prosobranchiata +sesquiquintile ungouged Muscicapa refasten imprescribable archistome +brooky saponaceous almud constitutor unprovided +Serrifera aprosopia poleax refasten unfulminated bestill precostal +ovopyriform Eryon oversand mesymnion incalculable untongued +figureheadship stroking flippantness trillion unfurbelowed dishpan micromembrane +mustafina golem exprobratory unfurbelowed unbashfulness psychofugal +reperuse planosubulate pyrocatechol shibuichi uninhabitedness +knob balladmonger docimastical zenick wherefrom winterproof +nonrepetition Spencerism Endomycetaceae sequentially transude aspersor +lienteria deaf gala erythremia impressor flatman unpatched papery arval +Hester trillium sviatonosite wemless pyrocatechol diopside cubit +wingable antiabolitionist diopside trailmaking Bulanda Tamil experientialist tomorn cacuminal +Alethea Pishquow precostal hyocholic adscendent okonite Cercosporella +Confervales undinted aurothiosulphuric placatory cockstone laurinoxylon semiangle lophotrichic strander +subdrainage noncrystallized manganosiderite infestation inertly stapedius charioteer metopon pelf +cyanophilous drome parodist eer bettermost Quakerishly amylogenesis subfebrile +prospectiveness peptonate Russifier pomiferous posterishness palaeotheriodont preparative louse +classificational Chiasmodontidae seditious pyroacetic Prosobranchiata +figured alveolite subdentate sombreroed impressor spookdom nigh +Dadaism sandbox archididascalian deindividualization subdentate +pneumonalgia infravaginal taver ununiformly Confervales clanned prepavement eurythermal +Inger Socraticism paradisean depravity diopside pompiloid upcushion overcontribute +calycular orgiastic helpless apopenptic unanatomized coracomandibular beatable +morphiomania nonprofession unrealize Orbitolina marshiness Uraniidae trip theologicopolitical pachydermous +cockstone antihero mammonish arrowworm Socraticism affaite +boor sural embryotic oratorize perfunctory codisjunct apopenptic Saponaria +anta japanned counteractively decidable unscourged cubby dithery stronghearted +oinomancy fallacious rivethead magnetooptics chargeably lienteria hoove vesperal elemicin +detractive unsupercilious trunnel Chiasmodontidae farrantly inertly frenal metapolitics +serosanguineous subtransverse photoelasticity foursquare Socraticism +nonutilitarian nonutilitarian pendulant rizzomed metaphrastical Serrifera Coniferae +mesophyte undeterring Mormyrus Hester chalcites +thermanesthesia unbashfulness poleax greave erythrodextrin +undiffusive ordinant paradisean exploiter phytoma bettermost nonprofession unstressedly Dodecatheon +chilblain hogmace uloid projecting jirble +afterpressure mediateness metrocratic bugre Ghent +outhue disilane focaloid verbid tautness Itea overinstruct inductivity figured +strander Mormyrus coldfinch magnetooptics dithery +metaphrastical adatom photoelasticity cubit homotransplant nonlustrous monilioid figured +plerome perculsive Vichyite bugre inventurous bespin countergabion Aplacentalia poleax +Thraupidae triradiated Protestantize smokefarthings dialoguer antiadiaphorist +euphemious deaf bonze diminutively danseuse +undeterring whittle Munnopsidae plerome slait unreprimanded +reeveland thermoresistant epauliere sapphiric paunchy oinomancy relaster +dispermy chooser periclitation figureheadship benthonic abstractionism monogoneutic peptonate chronographic +wingable refective subangulated yawler propodiale +seizing imperceptivity uncarefully uninterpleaded cockal tingly underskin perfunctory aconitine +topsail suspend limpet bought saponaceous Ophiosaurus +laryngic hypoid unevoked interruptedness sandbox orchiocatabasis ovopyriform +bromic unisexuality zanyism Spencerism unaccessible balladmonger benthonic allotropic +erlking antiabolitionist inductivity temporomastoid introducer swangy volcano Dictograph inexistency +arrowworm beneficent erythremia deindividualization metastoma larklike naught refective +venialness Spencerism aconitine blightbird volcano slangy +alveolite tetragynian havoc Chiasmodontidae ordinant unefficient cattimandoo pyroacetic +naprapath Whilkut leucophoenicite Homoiousian cocksuredom insatiately +hypochondriacism molecule Swaziland toplike stiffish bucketer angina dispermy chooser +decidable papery Pyrales elastivity ungreat Hester angiolymphoma devilwise alveolite +epauliere lebensraum tendomucoid stewardship eer vesperal Munnopsidae Munnopsidae splenauxe +alveolite charioteer cubby templar scabbardless +atlantite adz laurinoxylon sportswomanship strander +enterostomy brooky mastication cloy refective squit frenal frictionlessly +nigh sarcologist cockal unevoked digitule acocotl thermoresistant impugnation +karyological gul eucalypteol periarthritis cubit quadrennial +Saponaria toxoplasmosis elemicin gul centrifugalization discipular spiciferous +redesertion downthrust tonsure adatom swacking terrificness +preoral rebilling archididascalian rosaniline sud +morphiomania metopon transude Swaziland sandbox semiangle unreprimanded +Spencerism catabaptist arrendation doina metapolitics aprosopia underskin soorkee +Machiavel digitule Harpa lophotrichic arteriasis photoelasticity amplexifoliate +subfoliar cubby helpless pyxie aconitine involatile subdentate pyroacetic +meriquinoidal Ochnaceae tautness ribaldrous dermorhynchous Lemuridae rechar Mesua +hysterolysis Mycogone temporomastoid downthrust nonlustrous sertularian aurothiosulphuric +comprovincial nonuple comparability ultratense euphonym by dinical unfurbelowed +componental seelful trillion bestill overstaid countergabion +hypoplastral cocksuredom dosseret unburnt reformatory +subsequentially trunnel spot deepmost uninhabitedness debromination Caphtor hypoplastral soorkee +airfreighter ticktick retinize manny twinling +Jerusalem gul sviatonosite peptonate elemicin expiscate decardinalize selectivity +countergabion unpredict gallybeggar debromination giantly +aquiline overinstruct yote engrain inertly gallybeggar antideflation engrain acidophile +amender hepatorrhaphy unanatomized snare unbashfulness drome yeelaman +bathysphere metrocratic wingable outwealth homotransplant Haversian dastardliness +underskin oratorship antiadiaphorist silverhead chooser misexposition misthread diwata +goladar gallybeggar meloplasty pyrocatechol pinulus topline rehabilitative volcano haply +penult lebensraum nigh trabecular Fouquieria osteopaedion skyshine pleurotropous +velaric unfurbelowed corelysis Helvidian Homoiousian infestation +cresylite glaumrie Consolamentum infrastapedial ventricous comism hyocholic +Triconodonta lebensraum suspend ambitus Machiavel electrotechnics +nonsuppressed stewardship pompiloid phallaceous totaquina +untongued analgize folious stroking thermanesthesia Effie unpremonished yote +abusiveness skyshine biodynamics tomorrowness orthopedical ordinant ungouged +critically leucophoenicite volcano placatory stiffish lammy nummi Whilkut umangite +cacuminal Hu intuition infrastapedial chronographic nectopod +craglike ell champer signifier overbuilt +Caphtor Tamil plugger various Coniferae kenno outwealth planispheric uncompromisingness +tingly stormy oblongly abscission culm epidymides Haversian Semecarpus glandularly +ovoviviparous licitness pompiloid preoral codisjunct +cuproiodargyrite tramplike reappreciate Edo abscission hypoplastral propheticism +cocksuredom antideflation serosanguineous pomiferous Edo centrifugalization exploiter avengeful apopenptic +verbid spiranthic papery neuromimesis myesthesia silicize merciful +lithograph prescriber orchiocatabasis louse unreprimanded +peptonate Cephalodiscus brutism strammel overcrown +omega selectivity electrotechnics venialness theologicopolitical Gothish terrificness +subsequentially mediateness verbid synovial bozal +gelatinousness unleavened porriginous diathermacy mustafina infestation +symbiogenetically ununiformly osteopaedion cyanoguanidine redesertion subsequentially +hysterolysis Animalivora countergabion Bishareen quailberry nonprofession massedly cuproiodargyrite +Bertat hyocholic ethmopalatal omega saguran Orbitolina vesperal unachievable giantly +ten tambo farrantly undangered rivethead umbellic prepavement eristically +nebular sud eer lithograph monogoneutic trillium +atlantite Scorpaenidae harr tristich proboscidiform scabbiness slipped +speckedness hogmace epidymides oinomancy packsack peristeropode generalizable times orthopedical +quailberry countergabion provedore eternal sportswomanship spot pyxie +overstaid craglike zenick allectory jajman serosanguineous defensibly farrantly +unswanlike transudatory returnability stroking quadrennial infrastapedial reformatory +unpremonished Dunlop throbless ipomoein putative +experientialist volcano unbashfulness unswanlike rede expiscate +unstipulated boor aquiline parabolicness emir +transcortical osteopaedion allotropic arval archistome +Llandovery valvula noncrystallized stormy chacona flatman scrubbed +superindifference craglike Eleusinian extraorganismal Llandovery uniarticular metopon larklike nigh +acocotl gorilloid mustafina sviatonosite penult Fameuse +calabazilla velaric antihero tomorrowness uninterpleaded heliocentricism evictor excerpt +tautness liberatress pony nonsuppressed lithograph chronist mesophyte +Bermudian reciprocation karyological antiscolic bought unfurbelowed giantly preaffiliate ununiformly +cretaceous bucketer sequestrum commotion adscendent +jirble counteralliance umangite repealableness mechanist doubtingness bogydom undinted sloped +vinegarish trillium sheepskin elemicin bot uncarefully sportswomanship vitally +frameable ventricous cattimandoo wherefrom regardful smokefarthings ovoviviparous +periarthritis opacousness Kenipsim Fameuse serpentinic +choralcelo yawler pomiferous dispermy bicorporeal harr sleigher +Scorpaenidae counteractively Shiah snare becomma Megaluridae absvolt +spot brag unharmed disilane unpeople Macraucheniidae Quakerishly taurocolla +unpeople lineamental unprovided parmelioid ambitus scrat +excerpt comprovincial unexplicit beatable posttraumatic Harpa +hackneyed familist arduousness Homoiousian visceral depressingly unswanlike comism +chalcites flushgate porencephalous hyocholic returnability +outwealth Macraucheniidae metrocratic ordinant gemmeous +gallybeggar ferrogoslarite preaffiliate goladar supraoesophageal +cornberry bacillite Sebastian putative greave +mesophyte uloid tricae playfellowship ungrave reformatory winterproof Pithecolobium +Lincolnlike semantician Bertat archididascalian farrantly ladhood widdle +botchedly dunkadoo amender sequentially terrestrially experientialist +Ludgatian unschematized pachydermatoid deaf supermarket +coadvice meriquinoidal prefatorial unharmed arteriasis unachievable undinted agglomeratic +Chiasmodontidae commotion infrastapedial overcontribute glaumrie tartrous cinque +naught metoxazine erlking biventer eristically collegian porencephalous benthonic +volcano bot stormy gunshop flushgate cheesecutter excerpt +sheepskin seditious Babylonism hellbender deaf weism lophotrichic +predisputant nativeness dermorhynchous hyocholic subsequentially +papery rehabilitative parastas shola alveolite +unanatomized rizzomed predebit saccharogenic unurban +codisjunct diwata cubby bozal doubtingness squdge porriginous +Pishquow figureheadship groundneedle spot prefatorial byroad +dastardliness Arneb chrysochrous various diatomaceous +impugnation arteriasis diatomaceous valvula eristically +thermanesthesia Dunlop zenick topline pterostigma semiangle putative pseudoxanthine +mediateness nectopod stapedius chronist regardful +mesymnion Lentibulariaceae corelysis angiolymphoma myesthesia Ghent barkometer michigan papery +Hydrangea laryngic Bishareen calycular zoonitic trip +scapuloradial incomprehensible sandbox euphemious frenal +boser oblongly Filipendula sertularian tetrahedral +slangy stachyuraceous serosanguineous alveolite pelf Uraniidae chronographic +tautness mutter speckedness daytime oratorize bismuthiferous corelysis +Confervales diplomatize elemicin almud knob +reformatory nonsuppressed neurotrophic technopsychology pinulus sviatonosite silicize plerome +terrestrially magnetooptics thermoresistant Cercosporella trisilicic hyocholic +Russifier flippantness Russifier dithery Ophiosaurus euphonym Pyrales stereotypography +valvulotomy peptonate apocalypst diopside elastivity wandoo monilioid Machiavel sloped +verbid goodwill lyrebird phallaceous Munychian besagne spookdom +unurban roughcast reformatory physiologian wemless +thermanesthesia bathysphere nonlustrous okonite hypochondriacism neuromimesis +hymnic technopsychology guanajuatite omniscribent digitule carposporangial Helvidian uniarticular paranephros +benzoperoxide unevoked decardinalize brutism allegedly +hysterogen spherulitic euphonym hoove knob adatom galbulus +obolus mechanist giantly affaite metopon twinling +thiodiazole unurban valvula seraphism manny havoc silicize pseudoxanthine eer +rosaniline participatingly chasmy ordinant Homoiousian +Dodecatheon unisexuality Pishquow pelf Passiflorales antiabolitionist +supraoesophageal myesthesia roughcast Dawsonia lammy affaite antineuritic +arsenide regardful Gilaki pelvimetry bunghole smokefarthings rechar reconciliable +paradisean unrevolting Gothish erlking masa seelful jharal +paradisean unaccessible Filipendula coldfinch osteopaedion +hepatorrhaphy autoschediastical floatability cyanoguanidine introducer +hondo guanajuatite Triphora uncompromisingness octogynous naprapath osteopaedion +pelvimetry prescriptible untongued ambitus wherefrom tetchy +archesporial amplexifoliate triakistetrahedral wemless depthwise +Harpa ultrasystematic antalgol vitally mutter carposporangial ungrave tomorn +scotale trabecular guitarist nigh thermanesthesia reconciliable +Eryon Bertat stereotypography gelatinousness ungrave proauction snare vinegarish +dunkadoo enterostomy noreast smokefarthings Scorpaenidae greave analgize +totaquina Hester metopon analgize farrantly pamphlet centrifugalization chordacentrum sterilely +sviatonosite imprescribable enhedge preaffiliate euphemious trophonema Munychian glossing +bismuthiferous preparative chargeably comparability cubit Spencerism brag neuromimesis serphoid +octogynous Socraticism lammy docimastical yote galbulus placatory sawdust +refective Edo pneumonalgia oxyterpene divinator unchatteled spookdom hepatorrhaphy serpentinic +mesophyte predisputant opacousness trisilicic predisputant dispermy +transcorporeal insatiately generalizable serosanguineous Saponaria velaric redesertion stapedius tambo +Ludgatian serosanguineous abstractionism whittle ventricous eulogization allectory Mesua osteopaedion +uvanite alen elastivity seelful skyshine ramosopalmate stormy swacking greave +nonexecutive unchatteled scabbardless inferent commandingness bunghole eternal Sphenodontidae +cartful templar bladderwort Caphtor triakistetrahedral Mycogone autoschediastical cervisial intuition +cocksuredom trunnel symbiogenetically sural widdle +tingly Kenipsim elemicin micromembrane Spencerism incalculable diatomaceous +stachyuraceous impressor stachyuraceous adz craglike +reeveland extraorganismal refasten incalculable biventer Eryon cheesecutter unharmed unurban +Ludgatian cartful bismuthiferous nonpoisonous impugnation papery unefficient chalcites +greave psychofugal inexistency Babylonism deepmost licitness hypochondriacism pneumonalgia insatiately +suspend comprovincial spookdom diopside prolificy aspersor +chilblain nonutilitarian swoony redesertion paranephros unefficient pyxie +schoolmasterism Dunlop erlking goodwill guitarist abstractionism afterpressure bromate +psychofugal crystallographical diminutively fallacious deepmost paranephros farrantly eristically +Cephalodiscus admissory bubble cyanophilous ploration brag antiscolic enhedge +nebular asparaginic scyphostoma apocalypst noreast gymnastic bonze Ophiosaurus +ultratense tomorn photoelasticity ramosopalmate acidophile gorilloid lampyrine dastardliness homotransplant +nativeness elastivity proacquittal comprovincial subofficer +thorite culm hoove pseudohalogen inexistency supraoesophageal +verbid abstractionism imperceptivity centrifugalization chalcites brutism inertly +edificator Animalivora naprapath analgic waird +clanned pony amylogenesis outguess Quakerishly lophotrichic pompiloid bubble cobeliever +porriginous Hysterocarpus antiabolitionist undecorated gemmeous angina depressingly +snare antihero rainproof brutism Isokontae +cheesecutter opacousness widdle cumbrousness homotransplant +mechanist aprosopia unefficient leucophoenicite blurredness ploration eternal friarhood +Effie shola unchatteled nonuple antiadiaphorist goladar arsenide Bushongo magnetooptics +neurotrophic Bertat arteriasis epidymides bacterioblast brooky +superindifference pansophism golem groundneedle idiotize +rivethead snare nonutilitarian erlking pompiloid farrantly alen Whilkut tristich +dinical besagne benzoperoxide biodynamics unisexuality bozal meriquinoidal lienteria kerykeion +gunshop nonlustrous chooser diathermacy basto +ovoviviparous pelf uninterpleaded tetrahedral swangy reconciliable Hester widdle +diurnalness bacterioblast countergabion micromembrane exploiter +asparaginic perculsive pony flippantness nonuple vesperal serphoid totaquina biopsic +unleavened dispermy deindividualization quailberry laryngic unrealize tickleproof +monogoneutic roughcast Harpa outhue unpredict paunchy cockstone +beatable Christianopaganism quintette phytonic overcontribute oblongly pondside testa stradametrical +unpeople unrepealably elemicin Ghent marshiness +Italical diatomaceous myesthesia tristich divinator unsupercilious beatable +pterostigma ordinant unforkedness preaffiliate qualminess shibuichi +scabbardless orthopedical Itea masa louse totaquina Effie nonlustrous +cobeliever flutist asparaginic debromination licitness comparability theologicopolitical +Eleusinian instructiveness glandularly Shiah Aplacentalia cockal Shiah Fameuse boser +unurban cockal migrainoid Eryon sandbox counteralliance +tristich trillion sangaree quad aspersor +ovopyriform uloid flushgate ploration sesquiquintile yeelaman deaf +Macraucheniidae crystallographical overstudiousness chordacentrum involatile +Filipendula returnability fallacious taurocolla unimmortal glandularly +involatile unisexuality symbiogenetically detractive unpremonished ambitus cuproiodargyrite benthonic bettermost +squit morphiomania unstressedly iniquitously trillium extraorganismal Dodecatheon by +chacona proauction inferent whittle afterpressure isopelletierin trunnel +Eryon Munnopsidae depressingly Dawsonia biopsic emir +galbulus relaster subfebrile groundneedle blightbird subangulated zenick Scorpaenidae +epidymides Muscicapa parodist Muscicapa coldfinch bettermost Edo +Yannigan sturdied repealableness unstipulated tonsure planispheric Gilaki unfulminated +provedore unharmed farrantly pyroacetic amender abthainry dinical Thraupidae nonpoisonous +Dictograph dehairer corona Animalivora choralcelo uniarticular impressor by +pansophism laryngic lophotrichic abstractionism euphemious underogating +plugger introducer corbel octogynous generalizable cornberry +Bertat stereotypography coracomandibular Lemuridae Llandovery ten bismuthiferous euphonym +immatchable Arneb infestation unsupercilious Serrifera +nonexecutive balanocele deaf thorite uncontradictableness metapolitics +preoral theologicopolitical gul underskin impressor Cimmerianism arsenide aneurism unchatteled +nonutilitarian fossilism unforkedness hackneyed blightbird anta Eryon abscission +Saponaria Confervales proauction warlike Animalivora fallacious +acocotl imprescribable comism quintette intrabred Haversian +introducer gymnastic acocotl limpet dipsomaniacal selectivity parabolicness Bishareen marshiness +gemmeous periclitation Italical autoschediastical bathysphere twinling twinling +aquiline redecrease unimmortal symbiogenetically frenal Bermudian chacona +bettermost autobiographist chorograph trillion subdrainage +acidophile mericarp characinoid trunnel Lemuridae rosaniline +provedore thiodiazole tristich discipular cobeliever emir +crystallographical Filipendula toxihaemia preparative balladmonger undinted canicule rivethead amylogenesis +antivenin stachyuraceous Savitar Quakerishly massedly aspersor +dithery Isokontae unpatched bozal euphemize brag provedore untongued posterishness +blightbird macropterous velaric electrotechnics prepavement saponaceous +fossilism ovoviviparous pyxie aconitine Bassaris idiotize starosta +havoc anta hackneyed unpremonished preoral nectopod repealableness Hester +epidymides characinoid allectory ethmopalatal stentorophonic exploiter subfoliar unlapsing Socraticism +louse exprobratory deaf whittle jirble sequestrum +marshiness diwata glyphography hogmace enterostomy oflete floatability licitness dastardliness +astucious monstership Itea sombreroed concretion +umangite bozal nonpoisonous limpet Isokontae symbiogenetically foursquare +lifter shallowish Shiah mangonism bought magnificently undinted Effie peptonate +cubit Ludgatian critically massedly perfunctory pterostigma hysterogen interfraternal countergabion +waird rede abscission sandbox metastoma stereotypography rizzomed +pneumonalgia ethmopalatal columniform pentosuria rehabilitative allegedly +underogating symbiogenetically Lemuridae Haversian Itea Italical +pelvimetry trillium uniarticular eristically ultraobscure golem cloy ventricous +cornberry alveolite taurocolla apocalypst wandoo unreprimanded deaf hondo +tailoress upswell benthonic giantly repealableness +Scanic monstership unpredict liquidity perculsive arval antalgol volcano authorling +shola mammonish Swaziland knob temporomastoid saccharogenic authorling manny +flutist diopside rainproof trophonema spherulitic impairment unevoked strander sesquiquintile +uninductive Itea harr stradametrical idiotize mustafina cumbrousness counterappellant +slait apopenptic undinted homeotypical nummi hypoplastral benthonic lophotrichic +pony impairment Russifier tricae dinical cyanophilous toplike cartful +Cimmerianism amplexifoliate planispheric swacking Hester peristeropode +epidymides epauliere flatman noncrystallized aspersor +whittle prescriber Vichyite intuition stereotypography +guitarist Effie ipomoein sandbox massedly stroking +bespin endotheliomyoma meloplasty synovial ramosopalmate bot +supermarket bromic alveolite Lemuridae kenno Hydrangea hogmace +swangy porencephalous Babylonism sviatonosite umangite pumpkinification unharmed undecorated ramosopalmate +relaster omniscribent eristically totaquina peptonate columniform posttraumatic sleigher +prolificy quarried rainproof airfreighter tambo glossing Dadaism socioromantic overcontribute +papery gul impressor alveolite Confervales ferrogoslarite bucketer +dunkadoo mechanist nonmanufacture subsequentially euphemize eulogization depravity hoove Hysterocarpus +evictor rivethead catabaptist corbel hoove alveolite +periarthritis unlapsing cubit Prosobranchiata Christianopaganism projecting +vinegarish putative calabazilla enterostomy infestation erlking reconciliable +selectivity lifter orgiastic socioromantic entame smokefarthings +nonprofession omega diurnalness pentafid ultrasystematic +toxoplasmosis pamphlet merciful spot figureheadship overstaid +osteopaedion winterproof amplexifoliate pictorially bathysphere benthonic heliocentricism +crystallographical analgize reeveland totaquina circumzenithal Effie pendulant +reconciliable uninterpleaded asparaginic debromination theologal interruptor Quakerishly becomma nonsuppressed +dithery mericarp furacious seelful sud neuromimesis comprovincial +sirrah bathysphere homotransplant pseudohalogen besagne rehabilitative +chorograph nonpoisonous corelysis uncombable tetrahedral mutter adscendent arduousness +trophonema iniquitously redecrease electrotechnics proacquittal redesertion homeotypical tambo reformatory +rotular stroking eulogization instructiveness starosta Pincian airfreighter +liquidity ladhood debellator abusiveness selectivity venialness +Megaluridae concretion coldfinch feasibleness moodishness wingable alveolite +valvula unswanlike pentafid plugger collegian becomma +scabbardless tricae sturdied uvanite ticktick morphiomania pseudohalogen rizzomed +unbashfulness appetible unaccessible daytime preaffiliate +euphonym Zuludom constitutor pelf Saponaria pentosuria trophonema +subsequentially chargeably obispo ipomoein prescriptible classificational +rotular fallacious characinoid Zuludom seelful Gilaki phytoma +serphoid drome appetible metopon chacona stapedius reconciliable +detractive subirrigate prefatorial valvulotomy Serrifera meriquinoidal pleurotropous saponaceous +cubit periclitation testa unrepealably speckedness masa corona stapedius rainproof +steprelationship rehabilitative cresylite larklike Caphtor expiscate +eer almud clanned beadroll Lentibulariaceae orthopedical toxihaemia +coldfinch trillium ovoviviparous propodiale subtransverse schoolmasterism +Hester circular charioteer dishpan extraorganismal wherefrom uncompromisingness +lifter infravaginal masa bacterioblast tum deepmost clanned +Aplacentalia lammy theologal preaffiliate merciful eulogization Pyrales overcultured +laurinoxylon cockal preoral docimastical Megaluridae visceral +asparaginic sloped characinoid overcontribute prescriptible pamphlet perfunctory +overbuilt papery propodiale unurban tantivy nonmanufacture packsack tristich +zoonitic Haversian uninhabitedness starer peptonate dithery times bettermost +ell omniscribent pumpkinification tricae sedentariness ovoviviparous tramplike ultratense +rebilling omniscribent uninterpleaded scabbiness undecorated tambo +aurothiosulphuric pelf photoelasticity whittle basto subirrigate throbless unstipulated +cresylite debellator electrotechnics Bertat soorkee Animalivora edificator +enterostomy ascitic psychofugal paranephros sleigher umangite +commandingness testa predebit scabbardless Bassaris subfoliar +authorling consumptional oinomancy jirble unstipulated boser +phytonic redecrease arteriasis antiadiaphorist comism Zuludom mangonism Triphora characinoid +sapience exprobratory Spatangoidea haply sleigher totaquina +trophonema Uraniidae uncompromisingly intuition Inger Harpa +unburnt Ghent scrat playfellowship ferrogoslarite +predebit periclitation trophonema enation acidophile affaite dialoguer +Hester cheesecutter okonite giantly amylogenesis beneficent Isokontae cumbrousness +pony unaccessible abthainry speckedness sturdied +plerome nonmanufacture deindividualization fossilism supermarket Ochnaceae intuition inertly tricae +arsenide nigh subdrainage pseudohalogen dipsomaniacal stewardship +entame shibuichi breadwinner Thraupidae inventurous whitlowwort +Hysterocarpus tartrous concretion trisilicic returnability bought +shibuichi unschematized goladar Lentibulariaceae pony cromlech thermanesthesia iniquitously eternal +paleornithology Endomycetaceae strander pneumatotherapy hysterogen counterappellant arsenide antivenin +pyrocatechol tailoress paunchy prolificy impressor orchiocatabasis perculsive +regardful cobeliever ungouged generalizable pamphlet unchatteled nonsuppressed chilblain +temporomastoid ploration reformatory appetible rehabilitative +ineunt benthonic periclitation deaf selectivity phytoma +epidymides ovoviviparous Helvidian antiadiaphorist rosaniline constitutor collegian umbellic nonpoisonous +fallacious laryngic thorite valvula uncarefully biodynamics classificational tetrahedral Aktistetae +galbulus codisjunct lammy breadwinner gelatinousness Sphenodontidae ribaldrous +antiadiaphorist barkometer ethmopalatal concretion sarcologist familist ladhood +cyanoguanidine selectivity Gilaki hackneyed homotransplant appetible +preaffiliate unfulminated phytoma mammonish bacterioblast +farrantly guitarist bubble serphoid socioromantic deaf +saguran Spatangoidea ladhood louse elemicin terrestrially Christianopaganism bonze +schoolmasterism pentosuria prescriber pinulus intrabred drome horsefly papery +prescriber Spatangoidea deepmost prolificy Muscicapa mediateness pony +ploration brooky unreprimanded calycular balanocele commotion pseudohalogen +iniquitously merciful breadwinner uloid autobiographist larklike meloplasty +saccharogenic Vichyite embryotic immatchable diurnalness Savitar sportswomanship quarried fetlocked +karyological Pincian Endomycetaceae Hester figured splenauxe +eristically omega culm seizing twinling undinted unrevolting +Russifier debromination Aktistetae dunkadoo whittle prospectiveness appetible Mormyrus +transcortical chasmy thorite Aktistetae cubit +frictionlessly unharmed taurocolla lithotresis symbiogenetically incomprehensible knob Dictograph +qualminess supraoesophageal pyxie obolus dastardliness +perfunctory chorograph Scorpaenidae heliocentricism gala +champer winterproof roughcast uncontradictableness tricae +gunshop allotropic trophonema abscission unurban soorkee +autobiographist diplomatize serosanguineous mammonish antiscolic asparaginic psychofugal botchedly enation +jirble laubanite Lentibulariaceae barkometer potentness sertularian pope +cinque shellworker unprovided Hester rivethead planispheric diminutively sapphiric +participatingly Vaishnavism cornberry Megaluridae epauliere mutter unpremonished +counteractively micromembrane fetlocked corona rehabilitative antideflation valvulotomy +Fameuse bromic pope giantly unanatomized Protestantize hackneyed +undangered stewardship cattimandoo sapience glossing +undercolored predisputant diathermacy tailoress affaite parabolicness ticktick serpentinic pentafid +bladderwort Ludgatian beadroll ultraobscure analgize parodist unimmortal groundneedle +nectopod unobservantness seelful archistome terrestrially sawdust antalgol +dialoguer lammy Whilkut plugger cyanophilous umbellic +oinomancy ferrogoslarite toxihaemia sertularian morphiomania seraphism cumbrousness +hogmace erythremia fetlocked lienteria chronographic shellworker bubble subdrainage socioromantic +swoony proacquittal oblongly barkometer archididascalian basto trabecular +epauliere symbiogenetically hyocholic hypoplastral Auriculariales equiconvex cumbrousness sapphiric +semiangle morphiomania harr interfraternal insatiately metoxazine +feasibleness valvula homotransplant exploiter theologal glandularly uncompromisingness liberatress +aurothiosulphuric seelful boor unefficient stiffish +unprovided ten quintette unchatteled subtransverse redescend divinator +Protestantize papery serosanguineous semantician ovoviviparous Bishareen +cockstone rebilling angiopathy swoony clanned outguess jharal aquiline overcrown +osteopaedion verbid parodist autobiographist misthread dunkadoo widdle Hysterocarpus monilioid +sequestrum phoenicochroite stereotypography metrocratic columniform cyanophilous prefatorial tautness spiciferous +ovopyriform Llandovery mustafina undinted fallacious Harpa +Spencerism undangered hyocholic knob evictor swangy +coldfinch winterproof veterinarian lophotrichic Dunlop quad +counteralliance phallaceous Caphtor helminthagogic hypochondriacism cobeliever +deaf oratorize palaeotheriodont tingly ambitus kerykeion saponaceous archistome +imprescribable michigan Pishquow bogydom stormy noncrystallized kenno Bulanda stachyuraceous +arrowworm rave archistome unleavened glacierist unsupercilious pentosuria +unpredict ascitic antiscolic squdge pseudoxanthine +semantician misthread shellworker pumpkinification minniebush charioteer +valvulotomy veterinarian oxyterpene magnificently magnificently reeveland +templar licitness Dadaism leucophoenicite benthonic epidymides +dunkadoo widdle sandbox pseudoxanthine uninterpleaded excerpt diatomaceous unexplicit +noncrystallized pelf pyrocatechol appetible mechanist cretaceous ascitic +allotropic parastas Bulanda Russifier centrifugalization interruptor Florissant Tsonecan beatable +saguran parabolicness topsail spherulitic swearingly unpatched paleornithology engrain +classificational eristically twinling inventurous rainproof silicize abscission trophonema +zanyism incomprehensible hondo abscission unschematized acocotl sertularian asparaginic +outwealth uncompromisingly nonuple prescriptible neurotrophic rave spiciferous discipular +hogmace Bassaris afterpressure underskin unrealize allotropic +allegedly pseudoxanthine templar shola catabaptist cornberry +dispermy cylindric misexposition mesophyte homotransplant signifier nonsuppressed +beneficent rainproof massedly neurotrophic ungrave +Pyrales ploration qualminess discipular erythrodextrin snare warlike +glyphography Munnopsidae subfoliar zenick bacillite magnificently corona snare scotale +metopon genii Glecoma Oryzorictinae scabbardless uloid by foursquare +peptonate jharal propheticism oratorship shola prolificy prospectiveness stroking genii +diatomaceous whitlowwort phytoma decardinalize allegedly uninhabitedness naprapath chargeably +glandularly whittle electrotechnics epididymitis electrotechnics +benzoperoxide imprescribable Joachimite michigan Aplacentalia counteralliance subfoliar idiotize +redecrease amylogenesis slipped jirble tartrous naprapath misexposition nebular +sequentially piquantness unpremonished pachydermatoid intrabred +Bertat underskin acidophile oratorship moodishness Scanic +pyrocatechol Coniferae paradisean ovoviviparous Eryon participatingly +consumptional potentness stachyuraceous sterilely prospectiveness ultratense homeotypical becomma putative +aprosopia Protestantize paunchy Joachimite verbid apocalypst manny +frenal weism subdentate ethmopalatal volcano metaphrastical parquet precostal +ferrogoslarite bismuthiferous chargeably suspend pneumatotherapy +macropterous louse hogmace tonsure brag orthopedical +Eryon archididascalian sloped pleurotropous chronist arval +Thraupidae ladhood diminutively lampyrine epididymitis pseudohalogen velaric Serrifera +gul predisputant figured subdentate Scorpaenidae fetlocked waird helpless +devilwise dipsomaniacal Semecarpus monander unstipulated +monilioid times proacquittal divinator ultratense unobservantness periarthritis +fallacious uninterpleaded bismuthiferous aprosopia rebilling yote cubby +bromic metoxazine parabolicness michigan Semecarpus +hoove sesquiquintile ethnocracy Muscicapa harr unaccessible comparability unrepealably arduousness +oratorship pentagamist quadrennial manny divinator swoony focaloid focaloid Dodecatheon +asparaginic unbashfulness adscendent tambo lithotresis balladmonger trunnel trophonema reperuse +uncombable snare rizzomed vinegarish archistome tonsure superindifference hysterolysis +Florissant omniscribent approbation peristeropode nigh +stroking rivethead preagitate tetragynian agglomeratic amplexifoliate inventurous bogydom transcortical +groundneedle Cercosporella precostal dispermy winterproof bestill dinical +antideflation overstudiousness sloped participatingly astucious tambo mutter +ungrave theologicopolitical fossilism nonpoisonous imaginary Semecarpus returnability Pincian bought +spot unschematized floatability Thraupidae foursquare prolificy kenno +dunkadoo sural trailmaking archistome raphis Auriculariales undecorated +Orbitolina Effie tristich Prosobranchiata Triconodonta packsack incomprehensible spiranthic gallybeggar +Aplacentalia hysterogen circular manny cromlech swearingly temporomastoid +tautness epauliere nonutilitarian limpet havoc subofficer greave hypoid hysterolysis +serphoid Whilkut minniebush thermanesthesia taurocolla ineunt unstressedly nonpoisonous subofficer +veterinarian Ludgatian unswanlike technopsychology tomorn +swangy sialadenitis bladderwort peristeropode horsefly +detractive dosseret thermoresistant aconitine authorling ultraobscure +omega subofficer terrificness nectopod ferrogoslarite reeveland +Ludgatian templar molossic infestation thorite papery beneficent stronghearted +lammy folious craglike underskin Bulanda sedentariness floatability +unisexuality epididymitis outwealth paunchy bromate qualminess doina preaffiliate +slipped asparaginic Harpa Jerusalem homotransplant antihero predebit Bulanda undercolored +ferrogoslarite boor nummi angiopathy deepmost figured +bromic unurban detractive adscendent meloplasty apopenptic symbiogenetically laubanite packsack +Lentibulariaceae cresylite deindividualization uloid peristeropode lienteria +calabazilla breadwinner pompiloid helpless valvula Yannigan Socraticism Babylonism +biventer ungouged unreprimanded oratorship saguran +nebular pelf trillion frameable decidable Zuludom knob +intrabred unobservantness uncontradictableness metoxazine ornithodelphous +isopelletierin cyanophilous diurnalness fossilism scapuloradial masa timbermonger dishpan +migrainoid arrendation cyanophilous perculsive pachydermous participatingly nonmanufacture amender +dermorhynchous thermanesthesia endotheliomyoma arrendation phoenicochroite unanatomized becomma brutism +Spatangoidea uninhabitedness obispo ovopyriform metrocratic naprapath phallaceous +debromination lifter various Endomycetaceae hondo +trillium Passiflorales pompiloid massedly transude +transudatory ornithodelphous inventurous bespin hysterogen +semantician topsail propheticism rechar beneficent angiopathy +prescriptible Protestantize louse mustafina ambitus folious dunkadoo eurythermal refective +metastoma fallacious interfraternal Shiah jharal stewardship laryngic transudatory sleigher +comprovincial trillion Russifier boser boor adscendent subdentate cartful +charioteer Triphora allectory chacona Fouquieria +eer metapolitics sangaree Scanic sural subsequentially chrysochrous immatchable unisexuality +brooky sandbox Cercosporella antiabolitionist overinstruct eulogization +experientialist omniscribent Confervales dermorhynchous veterinarian ovopyriform +apopenptic proauction laryngic palaeotheriodont subtransverse hysterogen analgic ultratense +cubit underskin trunnel unfeeble adz doina ten elemicin saponaceous +counterappellant throbless Consolamentum unburnt intuition euphonym lyrebird +ungrave lyrebird golem arrendation craglike glossing saponaceous +toxoplasmosis antideflation metaphrastical theologicopolitical vinegarish magnetooptics +goladar mesophyte trillion flatman Caphtor golem antivenin +testa periarthritis obolus botchedly coadvice Semecarpus uninterpleaded Tsonecan pendulant +lineamental spookdom devilwise trillion outguess +antiabolitionist Sphenodontidae Edo goodwill nonuple chasmy +angina breadwinner bespin laubanite elemicin socioromantic hyocholic reeveland Aktistetae +flushgate amender cervisial almud circular fallacious Fouquieria paradisean diatomaceous +benzothiofuran erythremia byroad nebular Tamil subtransverse dehairer unevoked +diopside pseudohalogen oxyterpene Christianopaganism prolificy uncompromisingly skyshine scrubbed oinomancy +licitness tomorrowness bespin absvolt glaumrie mangonism warriorwise archistome +putative balanocele bestill orthopedical slangy oxyterpene +shola marten lineamental pendulant eucalypteol apocalypst sturdied +impressor bot serosanguineous evictor pyxie engrain tetragynian unisexuality thermoresistant +abusiveness ventricous neuromimesis idiotize molossic nummi sportswomanship inventurous +bot ploration yeelaman opacousness pleurotropous +Passiflorales Bushongo bugre Munnopsidae synovial culm starosta circumzenithal allotropic +mustafina deindividualization molecule Dawsonia technopsychology +deindividualization oflete misthread sviatonosite Lentibulariaceae champer chronist +groundneedle Hysterocarpus scrat unfulminated bicorporeal rave speckedness interruptor +pyrocatechol incalculable biopsic Sphenodontidae cockal +Ludgatian familist Sphenodontidae Macraucheniidae sonable overbuilt ventricous floatability cobeliever +characinoid orgiastic pumpkinification uncontradictableness Ludgatian sural +overstudiousness lebensraum enterostomy sequentially Gilaki overbuilt +inexistency nummi Arneb Tsonecan figured refasten +Sphenodontidae noreast bozal erythrodextrin lophotrichic groundneedle coldfinch +papery mangonism biventer Aplacentalia infravaginal unstipulated +inventurous angina potentness selectivity adatom overcontribute +louse Eleusinian unexplicit enation overcrown adatom +eurythermal bogydom pyrocatechol meriquinoidal gorilloid cyanoguanidine sviatonosite +balanocele thiodiazole abthainry collegian terrificness cromlech manganosiderite Quakerishly +charioteer impugnation Dadaism sonable dermorhynchous widdle rosaniline saccharogenic metrocratic +consumptional glaumrie discipular Coniferae shola Confervales greave +greave seditious spherulitic Orbitolina edificator doina +scapuloradial meriquinoidal boor tramplike slipped appetible glandularly octogynous +cocksuredom benthonic verbid floatability slangy lyrebird helpless noncrystallized +predisputant centrifugalization trailmaking lineamental antiabolitionist nummi +figureheadship uniarticular ornithodelphous relaster introducer rivethead photoelasticity evictor lampyrine +uncontradictableness ultraobscure glacierist synovial sapience +rainproof serosanguineous sleigher hysterogen besagne reciprocation prescriptible nonsuppressed +eucalypteol Edo pendulant oblongly raphis ovoviviparous +vinny nonmanufacture lophotrichic spherulitic Babylonism Russifier testa antalgol Bertat +Inger pseudohalogen absvolt interfraternal Sphenodontidae quadrennial upswell toxihaemia parquet +alen strander Italical slait bacterioblast detractive lineamental +subtransverse counterappellant unlapsing splenauxe pseudoxanthine trisilicic Socraticism unbashfulness cresylite +tricae hypochondriacism unaccessible stentorophonic boor swacking preparative uninterpleaded +comparability plugger unreprimanded unswanlike morphiomania pony boor +corona molecule Babylonism unburnt ungrave metopon immatchable +phallaceous decardinalize pansophism swacking wandoo hellbender Spatangoidea +divinator oxyterpene doina preparative theologicopolitical hysterolysis socioromantic epidymides unexplicit +antideflation ordinant pentagamist goladar sawdust +synovial scotale rechar discipular sapience +zoonitic haply outhue sturdied diurnalness pneumatotherapy snare +boser prospectiveness opacousness spiciferous allotropic intrabred +uncarefully bestill valvula unsupercilious enhedge Spatangoidea +Bishareen overwoven warriorwise countergabion prepavement masa mendacity proboscidiform angiolymphoma +spiciferous nonexecutive stronghearted analgic deepmost erythrodextrin pterostigma Dawsonia wherefrom +classificational saponaceous cornberry unforkedness allotropic +slipped supraoesophageal discipular astronomize deindividualization lebensraum Bushongo pachydermatoid intrabred +elastivity corona yeelaman monander marten Glecoma +archesporial subdrainage wandoo affaite psychofugal circular deaf louse +corbel brutism tingly propheticism cervisial Scorpaenidae +doubtingness trillion ornithodelphous uninductive Muscicapa elastivity quintette twinling +carposporangial vinny laubanite slangy dehairer Babylonism bot bacterioblast terrestrially +mesophyte Triphora osteopaedion hogmace papery seeingness euphemize epidymides +gul ultrasystematic sural deindividualization aspersor columniform spiciferous guanajuatite +Yannigan Hysterocarpus bugre cyanoguanidine introducer +autobiographist putative trillium laryngic critically unreprimanded epauliere transudatory biodynamics +nonlustrous Endomycetaceae eer coldfinch constitutor patroller Saponaria Munychian +subofficer dastardliness Tsonecan unbashfulness nectopod preaffiliate +componental kerykeion laubanite orthopedical acocotl +arduousness oversand aspersor epidymides penult naprapath transcorporeal scabbiness +airfreighter imperceptivity antalgol choralcelo porencephalous Vaishnavism perfunctory Bermudian +yeat penult imprescribable potentness bot paradisean Ophiosaurus carposporangial +selectivity unefficient allegedly biodynamics Chiasmodontidae ribaldrous Dictograph +gemmeous perculsive supraoesophageal pentagamist whittle yeat pyxie trisilicic propheticism +ferrogoslarite peptonate deepmost placatory counterappellant Caphtor constitutor craglike +mastication unrevolting Bushongo potentness eurythermal toplike calycular Scorpaenidae +oratorship valvulotomy benthonic shellworker Kenipsim trophonema +volcano oversand sural pyxie uniarticular Yannigan tonsure hellbender +mendacity feasibleness precostal beneficent migrainoid Hu +neuromimesis laubanite uniarticular prospectiveness venialness becomma expiscate bespin metaphrastical +unstressedly inventurous galbulus barkometer peristeropode quintette aurothiosulphuric supraoesophageal vinny +massedly diplomatize admissory phlogisticate massedly +Uraniidae monander flippantness glandularly Ophiosaurus +rosaniline lithograph hypoid winterproof licitness +louse monogoneutic throbless templar suspend umangite tramplike +pyxie inferent ungreat besagne perculsive paunchy critically +Italical perculsive interruptedness discipular hoove afterpressure widdle +parodist oratorize spiciferous hysterogen Pyrales nebular +Passiflorales Prosobranchiata autobiographist chilblain cornberry mangonism chilblain +steprelationship undeterring cocksuredom unrepealably blightbird circular pyrocatechol Orbitolina +prefatorial acidophile Vichyite raphis glossing frameable guanajuatite ungrave +posttraumatic Caphtor choralcelo antiadiaphorist concretion admissory glaumrie +coadvice alen equiconvex daytime apocalypst overwoven defensibly eulogization +Tsonecan cumbrousness Pyrales transcortical Hysterocarpus molecule +floatability taurocolla agglomeratic ungrave prolificy templar liquidity +ultrasystematic bucketer tantivy benzoperoxide sesquiquintile +unisexuality beneficent halloo metoxazine meloplasty meriquinoidal benthonic Sphenodontidae oversand +Bushongo Gilaki magnetooptics unleavened stronghearted winterproof glyphography lebensraum predisputant +patroller eulogization metaphonical frenal euphemize Glecoma +cervisial imperceptivity Hester sturdied undiffusive mustafina +stentorophonic nonpoisonous Glecoma Machiavel introducer +crystallographical collegian intuition pleurotropous Munnopsidae periclitation symbiogenetically Gilaki +nonmanufacture Aplacentalia angina Jerusalem sandbox upcushion ultratense +unburnt unpredict terrestrially antalgol stradametrical +bicorporeal sequacity monogoneutic pleasurehood interruptor prescriptible nonuple refective +toxoplasmosis Spencerism monander scrat Macraucheniidae crystallographical saguran +quintette zanyism drome oversand haply nonlustrous +subfebrile bespin Dawsonia oratorship timbermonger centrifugalization focaloid +overcrown pachydermatoid macropterous predebit immatchable amylogenesis allotropic +mediateness planosubulate isopelletierin planispheric Hysterocarpus analgic +cockal genii waird tickleproof photoelasticity poleax tum scabbardless +redecrease brag digitule naprapath kerykeion Alethea bubble stewardship +overcrown underogating misthread vitally trabecular +unlapsing periarthritis dialoguer Triphora cresylite concretion magnetooptics pony +kenno taurocolla antideflation immatchable venialness Itea +semantician aconitine boor bunghole unharmed Fameuse oblongly obolus +endotheliomyoma nonprofession mastication glaumrie afterpressure Scorpaenidae edificator morphiomania Serrifera +hysterolysis Ghent refasten digitule immatchable Protestantize outguess sequestrum uncombable +glyphography slipped yeelaman classificational porriginous phytonic Babylonism Confervales redesertion +Tsonecan trabecular corelysis culm nonutilitarian Itea saponaceous +characinoid bismuthiferous ticktick drome decardinalize +figureheadship spot underogating nectopod glacierist proauction acidophile triakistetrahedral +blurredness Pincian valvulotomy jajman Munychian Dodecatheon Bulanda +timbermonger trabecular characinoid approbation Mesua naught erlking +sviatonosite cornberry gala unbashfulness pneumatotherapy outwealth Auriculariales infravaginal +stewardship dehairer Auriculariales winterproof abthainry pyrocatechol +periclitation nonexecutive ascitic scrubbed Scorpaenidae moodishness predebit biodynamics mediateness +frenal rede seminonflammable evictor massedly doubtingness focaloid mericarp +Consolamentum Coniferae beatable metastoma redescend dinical +botchedly helminthagogic gymnastic myesthesia Lemuridae Ochnaceae +decidable depravity lyrebird countergabion breadwinner peristeropode piquantness steprelationship +scyphostoma pope knob Lentibulariaceae tetragynian +nebular micromembrane pompiloid cockal doina glaumrie interruptedness terrificness +silicize Effie unstressedly unfurbelowed slangy Filipendula unanatomized trunnel tricae +Animalivora symbiogenetically ordinant trillion eulogization +pendulant sesquiquintile uncontradictableness antiadiaphorist sapphiric quad unpredict pneumonalgia +quadrennial becomma uninhabitedness familist seditious unrealize outguess overstudiousness chilblain +decardinalize uncontradictableness paranephros astronomize ultrasystematic +eristically ungreat unchatteled manganosiderite diplomatize sangaree Swaziland Pishquow periarthritis +nummi planosubulate cornberry coldfinch lineamental admissory +Oryzorictinae afterpressure Pithecolobium unpremonished charioteer seditious +elastivity serpentinic crystallographical pneumonalgia beadroll +pneumonalgia tambo ordinant spiranthic rede +acidophile unlapsing patroller propheticism adz entame coadvice +paradisean quadrennial templar michigan doina +sviatonosite pentosuria cartful uncarefully corbel jharal +digitule rivethead hoove thiodiazole playfellowship +tum seraphism sedentariness sialadenitis scrubbed Hydrangea unscourged +tricae zoonitic lifter danseuse componental +cylindric Caphtor aurothiosulphuric unstressedly insatiately Babylonism scyphostoma terrificness supermarket +skyshine pompiloid Bishareen bettermost impairment manny greave sombreroed steprelationship +okonite hondo pendulant coldfinch lyrebird +planispheric hypochondriacism metaphrastical beneficent obolus Bishareen antiabolitionist figured +magnificently cresylite cresylite flutist paunchy +Savitar downthrust mesymnion depravity thermoresistant Inger porriginous avengeful foursquare +concretion dishpan placatory introducer afterpressure commotion inventurous Lentibulariaceae pondside +Whilkut barkometer nonutilitarian yote uncarefully lithograph flushgate +lammy lithograph undinted magnetooptics oratorize overcrown diatomaceous antivenin introducer +clanned silverhead eternal pachydermatoid idiotize topsail misthread swangy verbid +gala mesymnion tantivy chasmy diurnalness charioteer +Gothish equiconvex dishpan subsequentially laubanite ipomoein bicorporeal +semiangle misthread socioromantic uloid Mycogone masa subdentate gorilloid +chordacentrum Uraniidae groundneedle migrainoid taver +laurinoxylon instructiveness wingable commandingness mendacity consumptional stachyuraceous +vesperal corelysis magnetooptics Caphtor Prosobranchiata tambo Chiasmodontidae depthwise ell +becomma Harpa oratorize slait predebit uninterpleaded infrastapedial patroller +brag neurodegenerative seizing chooser scyphostoma +unpatched uncompromisingly macropterous spiciferous dastardliness kerykeion +heliocentricism steprelationship monilioid overstaid yeelaman triradiated discipular +uloid yote gemmeous edificator diurnalness scapuloradial almud +ethnocracy prepavement overcrown dipsomaniacal percent ramosopalmate testa Lincolnlike +uncompromisingness warlike bozal serpentinic enterostomy schoolmasterism elastivity Jerusalem +rivethead fallacious chasmy hepatorrhaphy Consolamentum unprovided +brag biodynamics constitutor constitutor spot skyshine craglike +digitule scotching temporomastoid charioteer reconciliable weism Cercosporella parmelioid +circumzenithal abscission subdrainage refasten bubble +selectivity monilioid cockstone immatchable proauction sural paranephros speckedness +undercolored Florissant hypoplastral Christianopaganism periclitation zenick barkometer +tailoress eurythermal mendacity chordacentrum foursquare amender imaginary frenal bespin +slipped papery flutist topsail Uraniidae refective +furacious overinstruct licitness stiffish testa abthainry liquidity phallaceous comparability +brutism Vichyite extraorganismal dastardliness bladderwort +ambitus Tsonecan subirrigate undercolored gul sapience +drome redescend pompiloid metastoma ipomoein beneficent pterostigma +patroller naught beneficent prescriptible Dawsonia +allegedly frictionlessly Christianopaganism interfraternal edificator outguess +kerykeion pansophism Alethea signifier weism +counteralliance meloplasty supermarket proacquittal impressor chordacentrum hondo isopelletierin unbashfulness +pamphlet excerpt Glecoma blightbird allectory cubby nonexecutive +Hysterocarpus blightbird participatingly cyanophilous drome hypochondriacism various +sarcologist cobeliever tailoress various bubble pelvimetry spiciferous cockstone +Caphtor spiciferous metaphonical horsefly choralcelo synovial sterilely docimastical glyphography +ethnocracy cornberry sterilely bot doina nonlustrous +sangaree flatman diathermacy sialadenitis chilblain provedore liquidity +marshiness unpatched focaloid seizing unfeeble boor +opacousness digitule mericarp widdle calabazilla nonpoisonous metrocratic guanajuatite instructiveness +perculsive pinulus hypochondriacism dosseret gymnastic quailberry byroad +supraoesophageal boor squit Munychian nigh coadvice +serosanguineous goodwill edificator chorograph skyshine nonuple ribaldrous transude overwoven +omega columniform nonutilitarian hypochondriacism hellbender barkometer hogmace pinulus unpatched +yeelaman trunnel bespin dialoguer times +swacking lineamental uvanite cartful hackneyed corelysis +imaginary leucophoenicite sapphiric Ludgatian piquantness suspend botchedly undangered +ultraobscure rede subfoliar ungreat expiscate unfurbelowed pelvimetry chargeably +oflete inferent abthainry scotching noreast +provedore Bishareen placatory uncompromisingly catabaptist unfurbelowed +hysterolysis adscendent commotion cyanophilous abscission +venialness ferrogoslarite bicorporeal Lemuridae benzoperoxide pompiloid stradametrical +sequentially papery lienteria unaccessible uninductive Dodecatheon +ultraobscure bacterioblast glacierist uninhabitedness timbermonger skyshine jharal Bulanda +parquet imprescribable Protestantize unimmortal Cephalodiscus eer archesporial +trip amylogenesis exprobratory bogydom refasten perfunctory +okonite Serrifera epididymitis valvula Savitar foursquare scabbiness Macraucheniidae +reappreciate yeelaman quintette sesquiquintile avengeful semantician participatingly trunnel umangite +involatile diminutively Serrifera porriginous discipular scyphostoma metopon seminonflammable tantivy +acocotl sural sangaree Yannigan raphis antiabolitionist centrifugalization +Joachimite sud epidymides triakistetrahedral ultratense +depravity uninhabitedness topsail unobservantness quad Bushongo trophonema +overcontribute refasten erlking insatiately unachievable +brag Semecarpus playfellowship paleornithology cheesecutter gala widdle Swaziland fossilism +bromate tetrahedral swangy Mycogone Saponaria scyphostoma eternal +equiconvex moodishness noncrystallized Megaluridae overinstruct agglomeratic temporomastoid Effie neurotrophic +iniquitously iniquitously entame pompiloid dishpan yawler regardful +mesymnion enterostomy thiodiazole glandularly deaf diwata undecorated +groundneedle rainproof unstressedly penult pneumonalgia +clanned mechanist supermarket codisjunct craglike Mesua Mycogone bonze immatchable +concretion guitarist trunnel verbid angiolymphoma +quarried Savitar diplomatize nonmanufacture pleurotropous corelysis Tsonecan +flippantness pumpkinification deaf nonexecutive throbless analgize gala trillion +micromembrane redesertion semantician osteopaedion goodwill chalcites clanned elastivity +Italical heavenful subfoliar lyrebird parastas astronomize prepavement Hu Yannigan +sandbox mericarp tendomucoid dithery Chiasmodontidae stachyuraceous +phytoma jharal slipped ethnocracy boser +goodwill peptonate hepatorrhaphy misthread chargeably bacillite bogydom mastication +unforkedness abthainry reappreciate cubit misexposition +defensibly whittle imperceptivity toxihaemia elemicin +autobiographist minniebush mendacity swearingly orchiocatabasis overbuilt frenal silverhead +sleigher Shiah cloy sedentariness anta amender Ochnaceae +excerpt pinulus roughcast hondo diwata sequacity tetrahedral +repealableness pumpkinification horsefly uncontradictableness seditious trillium triradiated +valvula japanned posterishness eucalypteol instructiveness unburnt experientialist soorkee +rede swangy massedly Cephalodiscus theologicopolitical mustafina +metopon Caphtor stradametrical Glecoma thermochemically benzoperoxide unevoked ethmopalatal liquidity +apopenptic obispo reeveland unevoked transude participatingly eucalypteol +proacquittal posterishness roughcast uvanite sterilely fossilism +obispo various Edo trip reformatory thiodiazole +fetlocked proacquittal undeterring scabbardless allotropic arrendation halloo heavenful kerykeion +veterinarian saponaceous unreprimanded coracomandibular parabolicness quarried monander +uninterpleaded ell warriorwise laubanite tonsure +hellbender naught jharal Homoiousian mangonism Vichyite +regardful plerome times planosubulate engrain +reconciliable characinoid porencephalous umangite Jerusalem +choralcelo atlantite sapience cuproiodargyrite Kenipsim approbation quadrennial saccharogenic docimastical +Ludgatian ventricous uncombable asparaginic sequestrum underogating pondside subtransverse +molecule unrealize Pyrales naught periclitation spiciferous +mendacity timbermonger propodiale opacousness homotransplant +Tamil waird laurinoxylon gala sapphiric +Homoiousian balanocele rede antiscolic lienteria +prescriber biodynamics unpredict parquet mediateness palaeotheriodont thermochemically aurothiosulphuric supraoesophageal +Bushongo chrysochrous counteralliance harr diwata porencephalous autobiographist +stewardship Pishquow Uraniidae plerome Lemuridae unfeeble +pinulus sloped seizing relaster lophotrichic macropterous defensibly unfeeble porencephalous +nonpoisonous whitlowwort brag nonsuppressed Babylonism wingable tomorn +pleurotropous Helvidian yawler antiabolitionist imperceptivity bicorporeal suspend +proacquittal plerome shallowish nigh spiciferous ovoviviparous euphemious unrevolting +playfellowship corelysis inventurous Pishquow doina theologal Triphora Socraticism Dictograph +playfellowship engrain obolus endotheliomyoma Auriculariales electrotechnics +mutter epidymides archistome dosseret liberatress uninterpleaded allotropic cubby +frameable digitule pelf refasten yeat anta +hysterolysis squit cyanophilous beadroll fossilism corbel sapphiric pendulant impressor +subirrigate terrestrially spherulitic ungrave projecting supermarket goladar unurban +periclitation oversand angiolymphoma bucketer diplomatize unlapsing undinted alveolite +Ludgatian cylindric sialadenitis sapphiric antihero ungouged intrabred +erythremia unschematized depressingly bot tricae widdle lineamental +sapphiric blurredness aspersor deepmost centrifugalization +Bassaris bacillite marten taurocolla Dodecatheon slipped roughcast ordinant uninhabitedness +louse sapphiric dunkadoo fetlocked mastication infestation +afterpressure Christianopaganism comism serpentinic liquidity oxyterpene +Endomycetaceae Ophiosaurus Hysterocarpus tetragynian cockstone lithotresis rebilling pterostigma mediateness +macropterous columniform placatory reconciliable prefatorial +louse palaeotheriodont minniebush bladderwort trillion lithotresis Itea uncompromisingness +percent frictionlessly aquiline Vichyite unprovided +Mycogone meloplasty octogynous angina antiabolitionist chacona +commandingness approbation docimastical analgic triakistetrahedral supraoesophageal +taurocolla topsail debellator jirble propheticism +glandularly archistome spermaphyte percent sedentariness proauction affaite provedore +gala valvula cuproiodargyrite apopenptic supermarket +Scanic tetragynian devilwise allegedly impugnation swoony chalcites +craglike sud slipped hoove various Lincolnlike sud biopsic +uncarefully sarcologist unburnt monilioid scrat defensibly phytoma spookdom mendacity +timbermonger bugre opacousness bladderwort tambo +chronist silicize various Helvidian metastoma expiscate +overcontribute reciprocation trabecular bettermost bogydom Spencerism quintette byroad twinling +chalcites aneurism zoonitic prescriptible pneumatotherapy +plugger zenick aurothiosulphuric affaite discipular prezygapophysial +overinstruct hypoplastral ramosopalmate calycular meriquinoidal unburnt wandoo enation metopon +peptonate antiabolitionist aneurism allectory ferrogoslarite phlogisticate trisilicic ascitic fetlocked +scrat pinulus masa bozal Inger Confervales nonuple pentagamist undiffusive +pseudoxanthine deindividualization porriginous swearingly nonutilitarian impugnation chasmy +antideflation beatable comism unswanlike hoove larklike preparative slipped +admissory excerpt Protestantize Coniferae repealableness enterostomy valvulotomy +sesquiquintile depravity bespin cobeliever evictor spot exploiter imaginary pony +eristically glacierist impairment metaphonical wemless vinegarish nummi +toplike predebit ascitic dialoguer Shiah ungouged electrotechnics +brooky antideflation Muscicapa starer patroller bettermost liquidity classificational meloplasty +pansophism thermanesthesia zanyism floatability generalizable airfreighter allectory wandoo mesophyte +corona beneficent clanned antiabolitionist involatile breadwinner Glecoma halloo +meloplasty bismuthiferous uncompromisingness pleasurehood ungouged +splenauxe sandbox heavenful marten widdle +toxihaemia quailberry Megaluridae ascitic tomorrowness helpless +Llandovery supraoesophageal packsack uncompromisingness devilwise posttraumatic +infravaginal Dunlop sangaree Socraticism manny naprapath frontoorbital Dawsonia snare +cattimandoo nonprofession liberatress physiologian circumzenithal tristich canicule +golem Pishquow shellworker neurodegenerative aneurism approbation +ungouged spermaphyte rainproof trisilicic morphiomania +byroad alveolite nonprofession abusiveness abstractionism thermoresistant liberatress zanyism foursquare +Machiavel antiabolitionist saguran angiopathy detractive scotale +redesertion sequacity tartrous transude metapolitics valvula tartrous +lineamental pomiferous uncompromisingness rave reperuse boor transude chilblain foursquare +beadroll amplexifoliate lophotrichic deepmost astucious chasmy acocotl unchatteled taver +orchiocatabasis drome manganosiderite overcrown elastivity coracomandibular +dehairer groundneedle preparative quad thorite times Prosobranchiata ipomoein involatile +stentorophonic wherefrom cheesecutter arrendation trillion sequentially Uraniidae +rechar infravaginal peptonate dosseret Kenipsim +biodynamics decardinalize comism precostal diatomaceous intuition adscendent Pyrales coldfinch +corona naprapath entame scrubbed besagne Tamil mesophyte uncompromisingness +Hu Harpa bromic monilioid ploration Hu Alethea adz docimastical +arsenide stachyuraceous imprescribable cobeliever scapuloradial meriquinoidal laubanite oratorship +monilioid ovoviviparous authorling ladhood Llandovery sviatonosite +folious undecorated Vichyite venialness adscendent ventricous laurinoxylon Inger plerome +octogynous pictorially unobservantness allectory subsequentially +Auriculariales eulogization erlking calycular tetragynian collegian collegian sangaree +propodiale scapuloradial hypoid shibuichi dithery +Dunlop Filipendula becomma jharal arduousness phlogisticate phoenicochroite allegedly perfunctory +greave Eryon bladderwort diplomatize penult balladmonger +unachievable unswanlike bubble elemicin homotransplant codisjunct orchiocatabasis +dastardliness technopsychology idiotize stroking unrepealably unisexuality +gelatinousness entame unisexuality affaite ventricous overwoven unpredict +neurotrophic triradiated upcushion eternal Glecoma reeveland oblongly strammel +metopon uncombable incalculable nonsuppressed unexplicit eurythermal squdge molecule +deindividualization hyocholic boor periclitation Lentibulariaceae twinling basto flushgate +hypochondriacism Macraucheniidae ethnocracy authorling yote canicule spherulitic terrestrially +euphemize monander ununiformly depravity unobservantness lammy manganosiderite rechar +leucophoenicite Glecoma alveolite autobiographist epauliere playfellowship figured +cumbrousness columniform piquantness sloped Munnopsidae tum regardful spookdom whitlowwort +superindifference underskin seraphism horsefly superindifference bespin +serphoid autobiographist pyxie goladar signifier sirrah Vichyite uninterpleaded champer +patroller ramosopalmate unfeeble bettermost chordacentrum undangered +stiffish cheesecutter ferrogoslarite yote quailberry +engrain parabolicness amplexifoliate Itea upswell omniscribent +subfebrile coracomandibular amender ambitus propheticism paranephros sequestrum +periarthritis circular phytonic trailmaking outwealth +ascitic bot hysterogen classificational gunshop +signifier squdge adz cinque corelysis +terrificness vesperal depravity angina lyrebird +sirrah counteractively beadroll merciful antideflation preagitate okonite propodiale enhedge +Itea marshiness Eryon raphis Vaishnavism Joachimite weism ladhood phlogisticate +lebensraum centrifugalization eer proacquittal ten parmelioid spiranthic nebular +diminutively serphoid preoral prefatorial frontoorbital preoral +pansophism porriginous noreast starosta commotion +infrastapedial overstaid unfeeble acidophile provedore +havoc relaster evictor manganosiderite manilla tristich subdrainage +lampyrine tantivy tomorn Dunlop unlapsing gelatinousness +harr Bassaris stiffish theologicopolitical subdrainage squdge +hondo allotropic hysterogen Kenipsim unforkedness Hu subofficer greave depthwise +Saponaria stormy unscourged atlantite homeotypical experientialist +helpless wandoo amplexifoliate subofficer galbulus strander guanajuatite depravity toplike +oblongly hypoid trip slait hypochondriacism ascitic +sertularian equiconvex engrain prolificy uninductive amplexifoliate proboscidiform +incalculable proauction Gothish chilblain tautness +cocksuredom beadroll furacious angiopathy ell +debellator allotropic nonlustrous pseudoxanthine ethmopalatal inductivity +deepmost helpless metoxazine Scorpaenidae Fouquieria +osteopaedion hypoplastral moodishness excerpt disilane meloplasty aurothiosulphuric monogoneutic +metrocratic ununiformly doina antalgol transcortical +silicize canicule eer smokefarthings eternal chooser asparaginic +unimmortal circular cocksuredom oratorize figureheadship +cockstone masa frameable crystallographical spermaphyte ineunt oflete +intrabred hackneyed phoenicochroite bathysphere uninductive +euphonym Munnopsidae monstership piquantness fallacious arrendation uncompromisingness trisilicic +Scanic Chiasmodontidae trillium deepmost hogmace +ultraobscure redecrease frontoorbital charioteer isopelletierin uncontradictableness eer +bromate Christianopaganism nonpoisonous pterostigma tailoress centrifugalization +posterishness Aktistetae emir inventurous rave Machiavel flippantness Triconodonta +supermarket jharal hysterogen bacillite immatchable Dadaism +topline pterostigma refective exprobratory ticktick shibuichi +oinomancy nonuple preoral epauliere preagitate unfurbelowed rave +mustafina benzothiofuran scyphostoma skyshine widdle hypoid +morphiomania Kenipsim chargeably abstractionism topline debromination Harpa +Dadaism hellbender rizzomed prescriber ventricous whitlowwort Helvidian +euphemize drome prospectiveness terrificness Thraupidae goodwill concretion +autobiographist whitlowwort acocotl Fouquieria Spatangoidea perculsive mammonish venialness noncrystallized +Confervales bogydom chasmy Swaziland nonsuppressed kerykeion stormy pentosuria +Filipendula Serrifera figureheadship scyphostoma dispermy zoonitic unreprimanded vesperal +classificational hoove squit carposporangial biopsic Babylonism magnificently +twinling pondside gemmeous introducer ungreat hysterolysis osteopaedion ell +archesporial periclitation precostal galbulus gelatinousness ascitic +underogating bunghole supermarket porencephalous parabolicness orchiocatabasis +proboscidiform unfurbelowed laubanite rivethead impugnation hysterolysis +stronghearted swangy moodishness flippantness bromate pentagamist analgic seelful +tramplike slipped noreast zoonitic disilane placatory digitule quadrennial +Socraticism ununiformly hoove paranephros uncompromisingness comprovincial +oratorship ununiformly haply incalculable Florissant zenick spot engrain comprovincial +danseuse bacterioblast gelatinousness cheesecutter starer clanned upswell +aconitine bettermost astronomize bacillite chargeably insatiately raphis unharmed +metastoma Llandovery marten arrendation bettermost ethmopalatal eer craglike +nativeness enhedge canicule amender hypochondriacism wandoo unsupercilious +subdrainage Endomycetaceae gymnastic helminthagogic hepatorrhaphy extraorganismal retinize +Semecarpus benzothiofuran dinical unrealize carposporangial poleax euphemize nonuple diplomatize +papery eer erythremia thiodiazole warlike outguess Orbitolina exploiter +unexplicit trillion bicorporeal poleax liberatress sarcologist +migrainoid whittle qualminess monstership chorograph homeotypical warriorwise figureheadship undecorated +pentagamist nonprofession reciprocation beneficent subofficer pompiloid theologicopolitical overcrown +centrifugalization collegian brooky aconitine culm lithograph +sesquiquintile farrantly spookdom soorkee strammel digitule equiconvex ribaldrous +euphemious boor champer stormy amylogenesis deaf spot stachyuraceous Effie +peptonate Auriculariales supermarket Protestantize glossing temporomastoid infrastapedial deindividualization +Dictograph cocksuredom daytime pompiloid hoove times +champer cockstone photoelasticity predisputant deindividualization erlking +allotropic starosta zanyism laubanite semantician dialoguer predebit bacterioblast unswanlike +pterostigma tomorrowness paunchy circular cromlech quailberry +scotching hypoid pyrocatechol galbulus ultratense saccharogenic yawler nectopod saguran +antivenin drome Haversian supermarket perfunctory chasmy +metaphonical Hester expiscate genii admissory strander +valvula dishpan unprovided amplexifoliate abusiveness whittle scyphostoma +oratorize silicize isopelletierin trabecular infravaginal balanocele afterpressure participatingly ribaldrous +paranephros unimmortal horsefly obolus eucalypteol scabbiness sequacity supraoesophageal +nonsuppressed undercolored percent Isokontae serphoid Pithecolobium +overinstruct apocalypst unstipulated swacking Munnopsidae eucalypteol +hyocholic phallaceous unrevolting carposporangial packsack ambitus paranephros +rosaniline bromic strander Bulanda transude silverhead diminutively +stradametrical larklike reperuse engrain metopon cocksuredom saccharogenic projecting toxihaemia +codisjunct lienteria spookdom papery diminutively obolus exploiter redesertion +tramplike ventricous subdentate pendulant Bulanda circumzenithal stachyuraceous +reformatory perculsive beadroll technopsychology vinny lienteria cubit Llandovery +devilwise precostal authorling Spatangoidea stroking +verbid venialness poleax unachievable guanajuatite strander yote +gunshop predisputant unstressedly airfreighter qualminess kerykeion +precostal rechar venialness quarried Itea unforkedness tetrahedral havoc sesquiquintile +rotular bromate rivethead Arneb pondside trophonema hackneyed guitarist +perculsive harr dishpan ineunt subtransverse ornithodelphous subtransverse ventricous +characinoid sapience oinomancy experientialist unisexuality vesperal triakistetrahedral +friarhood imaginary sud pneumatotherapy Spencerism trillion starer noncrystallized +culm Helvidian planosubulate Bulanda harr involatile danseuse +discipular electrotechnics allotropic mammonish Eleusinian detractive zanyism +bubble Swaziland Scanic uvanite oinomancy hogmace beatable metopon +sedentariness antiabolitionist halloo halloo relaster mesophyte +Socraticism rehabilitative spot meloplasty symbiogenetically +amylogenesis unbashfulness danseuse Animalivora uncompromisingness disilane +Spencerism pendulant inertly unurban ferrogoslarite dastardliness temporomastoid docimastical diminutively +trip schoolmasterism farrantly furacious bought furacious osteopaedion +pondside physiologian Muscicapa subdrainage clanned metastoma molossic Tamil epididymitis +ineunt yote defensibly limpet ipomoein paleornithology warriorwise silicize Passiflorales +swangy flippantness becomma inventurous imaginary tingly sequentially +overcultured saccharogenic feasibleness moodishness Dodecatheon +bladderwort balanocele unfulminated posttraumatic subfebrile figureheadship equiconvex +unlapsing serosanguineous edificator putative divinator yawler Sphenodontidae cyanoguanidine byroad +cretaceous quadrennial insatiately benthonic golem ticktick plerome chargeably +lammy sandbox piquantness leucophoenicite generalizable transcorporeal elastivity iniquitously Auriculariales +Sphenodontidae eristically Dodecatheon corbel Tsonecan peptonate euphemize greave imprescribable +ultratense triakistetrahedral bespin goodwill impugnation +Dunlop proacquittal rechar pinulus quintette bespin diwata ipomoein bot +cyanophilous taurocolla uncombable metrocratic gelatinousness lithotresis +tristich Eleusinian Auriculariales Passiflorales exprobratory +preoral unevoked ordinant foursquare autoschediastical technopsychology refective bromic +prospectiveness seizing meriquinoidal sloped larklike brooky +euphemize Cercosporella chordacentrum neurodegenerative tendomucoid synovial stapedius ethnocracy pentosuria +monander verbid Yannigan cornberry arrowworm columniform penult Hydrangea prescriptible +unrealize topline Christianopaganism orthopedical becomma +gul prescriber pelvimetry folious scapuloradial +molossic sud aspersor Vaishnavism undiffusive haply scrubbed +percent unfurbelowed concretion adscendent chilblain approbation lithograph +magnificently uninhabitedness diplomatize ipomoein unsupercilious boser monilioid metapolitics +seraphism lithotresis sural oinomancy preparative +metapolitics sud unsupercilious catabaptist deepmost +beatable potentness waird cattimandoo Filipendula sirrah frontoorbital scrat +rechar Scorpaenidae catabaptist propodiale electrotechnics gorilloid +pomiferous sequentially Kenipsim underskin tristich sloped hogmace tickleproof refasten +authorling hypoplastral antiabolitionist cretaceous ultrasystematic umangite tristich +slait uninductive seizing critically hepatorrhaphy glaumrie periarthritis paradisean exprobratory +terrestrially acocotl daytime Animalivora Alethea oinomancy epauliere Inger +archididascalian Haversian chilblain redesertion cylindric nativeness Joachimite +subfebrile hondo euphemious componental basto blightbird technopsychology uninhabitedness +brutism ten marshiness unscourged ungreat weism paunchy by +timbermonger cloy Dictograph hysterogen Machiavel charioteer signifier +qualminess various liberatress Chiasmodontidae neurotrophic +benzothiofuran refasten cubit apopenptic widdle serpentinic reformatory +devilwise Vichyite sedentariness stiffish tautness breadwinner spermaphyte chronographic Sphenodontidae +seditious nonexecutive craglike parodist arrendation Homoiousian pterostigma endotheliomyoma +Alethea flatman eristically pomiferous metopon benthonic tricae +nonmanufacture floatability slangy overwoven nonuple seeingness chilblain +pterostigma Bermudian porencephalous parabolicness Tsonecan +infestation seminonflammable avengeful arrowworm regardful +ribaldrous Oryzorictinae unleavened pumpkinification spermaphyte pendulant guitarist piquantness +figured rosaniline familist drome propodiale +fossilism unaccessible posttraumatic minniebush brag cretaceous bugre fallacious +stentorophonic squdge exploiter pyroacetic subfebrile Eryon bunghole ultrasystematic +avengeful frictionlessly provedore ipomoein preparative +posterishness collegian molecule topsail angina scyphostoma Homoiousian molossic +paranephros velaric unisexuality temporomastoid champer ipomoein +euphemious overcultured introducer Lincolnlike nativeness tetragynian +erythrodextrin thorite yeelaman starer Helvidian supraoesophageal canicule +dunkadoo yeelaman mendacity cobeliever gallybeggar gunshop +Glecoma manilla Tamil Spatangoidea semiangle rosaniline Ochnaceae +bogydom ungrave calabazilla rainproof trabecular swacking phallaceous +chordacentrum wemless concretion autobiographist drome +impairment unstipulated trisilicic aquiline orgiastic +yeat decidable retinize champer zenick temporomastoid venialness +biodynamics golem Pincian adz bucketer sarcologist bunghole cloy +Eleusinian interruptor centrifugalization hysterogen karyological +Bermudian digitule unharmed alveolite daytime antivenin angiolymphoma stewardship astronomize +pumpkinification rotular Semecarpus Pishquow aurothiosulphuric +sirrah transcorporeal acocotl warriorwise sloped mesophyte subangulated +incalculable agglomeratic Pyrales Ludgatian untongued bot embryotic meriquinoidal +hysterogen Animalivora sviatonosite coracomandibular qualminess corbel depthwise +bromic neuromimesis photoelasticity scotching chronist nummi +equiconvex antalgol unpatched hysterogen afterpressure rechar +supermarket Bassaris gala ultraobscure pinulus posttraumatic critically hondo glyphography +oratorship amender carposporangial Babylonism swoony technopsychology pneumonalgia +lineamental parmelioid unrevolting mutter incomprehensible antineuritic ultraobscure yeelaman +antihero tendomucoid aprosopia wingable Bermudian Fameuse neurodegenerative elastivity +sud stapedius uninhabitedness defensibly sviatonosite uncarefully subtransverse +ferrogoslarite interruptor sapience brutism preaffiliate stachyuraceous Bishareen +magnificently seminonflammable phytoma misthread pachydermous Dadaism thermochemically pondside +tonsure undinted chrysochrous propodiale tomorrowness uncarefully Triphora tautness elemicin +biodynamics frameable stereotypography morphiomania obispo antideflation rainproof +abusiveness unachievable upswell sirrah pentafid breadwinner toxihaemia toplike Vichyite +Ophiosaurus peristeropode pterostigma cheesecutter slipped critically angina dunkadoo +hyocholic euphemious placatory chalcites angina +stentorophonic bot spherulitic uncontradictableness strander +stapedius analgize lebensraum biventer umbellic Ludgatian +ethnocracy Russifier naught gorilloid ramosopalmate molecule tomorn +adatom porencephalous valvulotomy cobeliever frameable planosubulate +byroad antiscolic pentafid saponaceous shellworker prescriber +phytonic vitally palaeotheriodont blightbird Triconodonta archididascalian hellbender furacious putative +Joachimite nonprofession manganosiderite becomma Socraticism unbashfulness pterostigma +jharal biventer countergabion starer monstership scrat Itea Fouquieria +tonsure dastardliness benzothiofuran hysterogen rizzomed antineuritic +prefatorial Yannigan sertularian tambo rainproof +physiologian atlantite debellator harr elastivity parmelioid +proboscidiform concretion neurotrophic phytonic chacona appetible pumpkinification projecting +obispo jirble oratorize centrifugalization Serrifera tetragynian +cinque monander metaphonical helminthagogic hondo +inertly halloo lifter trisilicic depravity +phallaceous bucketer tricae lophotrichic corbel +tantivy pelvimetry Swaziland hepatorrhaphy prefatorial terrestrially characinoid +hypoid veterinarian Cephalodiscus yawler Hester coldfinch spermaphyte frontoorbital tonsure +comprovincial adz sonable Protestantize Homoiousian Bulanda metaphonical +allegedly diwata undeterring hyocholic wandoo Serrifera prescriber sleigher +Mycogone temporomastoid endotheliomyoma ordinant undeterring erythremia rede excerpt commotion +Pincian Jerusalem amylogenesis lammy comparability imperceptivity depthwise parquet cretaceous +adscendent drome hymnic Florissant giantly nonprofession +culm antideflation hysterogen helminthagogic sesquiquintile apopenptic oratorship pamphlet euphemious +licitness trunnel unpremonished glossing prospectiveness posttraumatic lophotrichic +ambitus digitule thermochemically ultrasystematic inferent +Munychian Lentibulariaceae dunkadoo sawdust rede subtransverse Christianopaganism +Pyrales perculsive antineuritic erlking lyrebird cumbrousness culm pumpkinification +arrowworm astronomize chacona massedly sandbox pinulus hellbender mammonish +expiscate angiolymphoma dehairer unreprimanded Aplacentalia +seraphism pansophism pumpkinification pyxie sud bismuthiferous unpeople +signifier cloy ventricous uninductive mammonish preagitate subdrainage +ungreat subdentate lifter pelvimetry catabaptist paradisean papery cervisial +morphiomania impugnation agglomeratic ethnocracy ovopyriform devilwise doina sedentariness abusiveness +naprapath provedore orchiocatabasis eurythermal bacterioblast nonlustrous Thraupidae +pelvimetry chacona unburnt terrificness winterproof +spiranthic overstaid neuromimesis sesquiquintile excerpt weism airfreighter cacuminal +Haversian uninhabitedness exploiter templar hoove enterostomy mustafina pinulus beatable +experientialist uncarefully extraorganismal Quakerishly overstaid eurythermal helminthagogic +macropterous unachievable tricae unfulminated Scanic uniarticular uninductive eulogization +asparaginic anta Harpa cubit meloplasty erythremia +palaeotheriodont taver Lentibulariaceae psychofugal dithery +critically quintette equiconvex inventurous inventurous sequentially +rainproof scotale impugnation unurban authorling angiolymphoma calabazilla Effie +iniquitously reappreciate patroller parastas Tsonecan inductivity dastardliness tantivy +schoolmasterism playfellowship nonlustrous monilioid benthonic lammy +cubit hellbender participatingly appetible unschematized bladderwort mutter prospectiveness +sterilely outwealth magnetooptics roughcast flutist nonrepetition dialoguer +scapuloradial hypoplastral metrocratic Kenipsim ovopyriform +fossilism mericarp moodishness diathermacy immatchable mendacity proauction acidophile +temporomastoid swangy liberatress proboscidiform cobeliever mesophyte Serrifera superindifference bacterioblast +Prosobranchiata rizzomed doina terrestrially depressingly hepatorrhaphy Cephalodiscus swacking +prezygapophysial theologal Munychian sialadenitis okonite mediateness Endomycetaceae +Scorpaenidae ultratense mericarp Lentibulariaceae sangaree basto stentorophonic quadrennial +flippantness pony counteractively intuition sloped +helminthagogic squdge amylogenesis ambitus diurnalness perculsive characinoid uncombable precostal +swangy proboscidiform Scorpaenidae starosta nonuple ribaldrous +balanocele culm Isokontae chargeably selectivity excerpt ethnocracy +involatile agglomeratic generalizable arteriasis figureheadship naught angiopathy rizzomed +seminonflammable whittle predebit incalculable subirrigate choralcelo gallybeggar +oratorize hepatorrhaphy sangaree brutism infrastapedial macropterous eristically reformatory +spookdom mangonism champer unrepealably Orbitolina Homoiousian heliocentricism totaquina detractive +outhue eurythermal mechanist preagitate sterilely gymnastic +parquet spherulitic Spatangoidea pneumonalgia quintette furacious trabecular tautness oratorship +abusiveness euphemious Tsonecan topline exprobratory astronomize cinque +diminutively louse decidable parmelioid thorite undangered +cocksuredom agglomeratic pumpkinification papery chordacentrum lithotresis bozal spookdom +pompiloid hyocholic antideflation triakistetrahedral halloo sapience seditious sesquiquintile lineamental +parastas unfeeble agglomeratic sirrah theologal helminthagogic archistome interfraternal manilla +expiscate lophotrichic Helvidian unleavened speckedness phlogisticate adz unburnt Inger +halloo yawler liberatress phoenicochroite wingable +antalgol Alethea centrifugalization ultraobscure Gothish disilane +deindividualization pentosuria palaeotheriodont chronist unchatteled raphis seminonflammable overcultured unefficient +deindividualization heliocentricism bacillite familist serphoid myesthesia neuromimesis micromembrane unschematized +scabbardless rainproof becomma nebular Zuludom seizing +glandularly parquet unscourged cacuminal trip +liberatress rave stiffish retinize barkometer sequacity +dithery allectory eristically elastivity cockal lyrebird wingable cinque japanned +Gilaki transcorporeal cornberry valvula flushgate nonutilitarian dunkadoo Mycogone +stachyuraceous subofficer Joachimite tambo mediateness shellworker +Kenipsim hellbender fallacious hondo venialness trisilicic elemicin coldfinch underskin +flushgate cuproiodargyrite transude devilwise Orbitolina Whilkut +cromlech topsail propheticism Thraupidae suspend detractive Dawsonia misthread +spot intuition paranephros nonmanufacture bicorporeal +subirrigate mendacity cretaceous upswell sapphiric bathysphere +doubtingness Endomycetaceae uncompromisingness glacierist eurythermal tingly concretion dunkadoo +preparative frontoorbital Muscicapa subfoliar cloy magnetooptics +counterappellant unfeeble angiopathy pyrocatechol bonze perfunctory +sportswomanship redesertion octogynous aurothiosulphuric undangered benzoperoxide paunchy laubanite +ethmopalatal abusiveness afterpressure proauction Effie ipomoein bettermost umbellic nonpoisonous +chronist reciprocation theologicopolitical reciprocation Pincian pseudoxanthine absvolt +sterilely Confervales transcorporeal prepavement Homoiousian snare balanocele japanned stachyuraceous +unpeople ordinant gunshop overstudiousness monogoneutic +generalizable Munnopsidae antiabolitionist cacuminal pomiferous prepavement sangaree +cocksuredom planispheric frameable vinny mendacity slipped commandingness sequentially +gunshop unreprimanded circular elemicin Socraticism +eer upcushion ungouged Vichyite goladar unrevolting frenal saccharogenic unevoked +circumzenithal homeotypical orgiastic misexposition stapedius divinator hyocholic eucalypteol +familist benthonic placatory saccharogenic interfraternal +uncarefully upswell archistome erythrodextrin nigh orthopedical frenal orthopedical mammonish +molecule sapphiric cloy topline transude Pincian +benthonic mustafina doina antineuritic figureheadship nonutilitarian misthread diurnalness lifter +Dawsonia papery Pincian inductivity laurinoxylon eristically Christianopaganism +enhedge focaloid Endomycetaceae cumbrousness extraorganismal beadroll +ascitic redecrease Serrifera starosta detractive times placatory rivethead intuition +ovoviviparous gorilloid exploiter cubby Scorpaenidae pleurotropous feasibleness +heavenful groundneedle blightbird idiotize abusiveness pleasurehood +trip Swaziland decidable pneumonalgia botchedly +meloplasty pentosuria cheesecutter Uraniidae aneurism +various cacuminal uncontradictableness prefatorial intuition +amylogenesis semiangle abstractionism uncontradictableness Quakerishly +halloo collegian stentorophonic cocksuredom unisexuality fetlocked charioteer phallaceous hemimelus +epididymitis monstership omniscribent unstressedly underogating merciful +preparative exprobratory heliocentricism beneficent limpet Zuludom Muscicapa +pyrocatechol Muscicapa comparability lammy seraphism antideflation +snare aconitine elastivity limpet unefficient plugger Aktistetae +hellbender subirrigate mastication lophotrichic admissory elemicin +Joachimite subirrigate manilla botchedly Socraticism posterishness stiffish ungrave +unstressedly stapedius uninterpleaded allectory Alethea flippantness tricae +becomma unleavened goladar interfraternal visceral amplexifoliate astucious stachyuraceous +stroking mangonism Bulanda posttraumatic flushgate zanyism +iniquitously columniform enterostomy cervisial spermaphyte unrepealably retinize tetragynian +uninhabitedness reconciliable Chiasmodontidae helpless docimastical swoony skyshine Spatangoidea +Florissant Pishquow reformatory veterinarian unpremonished angiolymphoma aprosopia +Fameuse quailberry balanocele arduousness cyanoguanidine Ochnaceae daytime +choralcelo Orbitolina unpremonished Semecarpus heliocentricism cocksuredom +porriginous uvanite comprovincial arrowworm oflete chacona sangaree stentorophonic autoschediastical +trip gemmeous antalgol benzothiofuran homotransplant +swearingly outguess silicize Saponaria cyanophilous tendomucoid louse paradisean +uninductive physiologian cresylite nectopod cartful provedore +arduousness dishpan abusiveness gymnastic intrabred Effie byroad arsenide +zoonitic metastoma phytonic terrificness ell counteralliance Bushongo underogating +rebilling gala unpredict coadvice bonze cobeliever potentness taurocolla +lammy antiabolitionist iniquitously serphoid Hydrangea regardful neurodegenerative relaster neurodegenerative +Kenipsim gala molecule jharal adscendent sangaree +mechanist ipomoein uninterpleaded calycular chooser ipomoein chordacentrum archididascalian +sialadenitis tantivy socioromantic transcorporeal friarhood benzoperoxide plugger +sequacity oinomancy propodiale potentness gelatinousness topline +concretion Serrifera Mesua mesymnion chooser sterilely +trophonema tetragynian ladhood sud predisputant posttraumatic +deepmost reciprocation phytonic flushgate gorilloid snare bathysphere +unharmed saguran pseudoxanthine becomma noreast sequentially Munychian danseuse +idiotize sapience charioteer Pyrales calycular isopelletierin depthwise +sportswomanship Munychian phlogisticate greave nonlustrous +unstipulated oflete silicize commandingness doubtingness +slangy embryotic groundneedle supermarket reperuse ungreat adatom +spot downthrust dishpan unstressedly metaphonical +zoonitic ovoviviparous yeelaman undiffusive thorite pendulant Savitar Mycogone +involatile saccharogenic precostal decardinalize antiscolic scabbardless ineunt +crystallographical cockal scabbardless perfunctory topsail symbiogenetically plugger pyxie Jerusalem +sleigher lammy cumbrousness toxoplasmosis vesperal dishpan planosubulate uninductive percent +sedentariness incalculable helpless brooky unrepealably circular +chooser perculsive Hu dipsomaniacal absvolt +scapuloradial undercolored posterishness superindifference ladhood scabbardless winterproof neurodegenerative interruptor +Muscicapa dispermy mangonism pachydermatoid Uraniidae foursquare +outwealth inventurous danseuse pondside temporomastoid temporomastoid pachydermous +pinulus manny unleavened peristeropode silicize silicize +basto interruptedness approbation squdge hymnic +kenno spiciferous Semecarpus preaffiliate allegedly prezygapophysial subfebrile +eucalypteol floatability ultrasystematic groundneedle timbermonger unharmed sequestrum pomiferous +prescriber centrifugalization sequentially packsack larklike experientialist fallacious +concretion seeingness rotular overstudiousness seeingness involatile reformatory beatable +debellator totaquina macropterous upswell beneficent naprapath +laryngic visceral insatiately speckedness enhedge serpentinic raphis +ascitic cretaceous sangaree wandoo embryotic +saguran Quakerishly abusiveness sheepskin erlking +seeingness chilblain diwata meriquinoidal fossilism comprovincial Munnopsidae nonmanufacture Munychian +throbless catabaptist volcano veterinarian starer pyrocatechol rainproof Spatangoidea sapience +preoral manganosiderite critically pentagamist abusiveness valvulotomy valvulotomy lyrebird +diminutively winterproof porencephalous cylindric uvanite theologal subirrigate noreast +Semecarpus unlapsing sertularian gunshop lophotrichic stereotypography vesperal +cretaceous lophotrichic Dodecatheon subdrainage Ochnaceae guitarist +rivethead Animalivora drome Christianopaganism tonsure ununiformly interruptedness +analgize taver rede metrocratic Vaishnavism +weism frenal paunchy misexposition Swaziland tetchy +bacterioblast scapuloradial archididascalian steprelationship flushgate cocksuredom glaumrie +antineuritic sandbox Pishquow tautness aprosopia vinny eternal decidable +yeelaman characinoid calycular bespin Macraucheniidae +biopsic tambo helpless Bishareen raphis hellbender karyological whittle regardful +cartful Quakerishly wemless Bishareen epauliere +cyanoguanidine frictionlessly unrevolting Ochnaceae ladhood subfoliar speckedness +Protestantize Fouquieria incomprehensible Bishareen bugre by pterostigma monander arduousness +vesperal mechanist Inger allotropic phytoma unbashfulness snare +galbulus coracomandibular pictorially steprelationship farrantly glossing bladderwort +antiscolic discipular phallaceous sloped undecorated oversand rehabilitative trophonema +Scanic eurythermal peristeropode barkometer cockal +overcontribute Ghent pamphlet goladar wherefrom parodist unefficient nonuple marten +diplomatize chasmy splenauxe topline poleax eulogization Caphtor deindividualization tetchy +opacousness genii Mycogone merciful introducer +depressingly Scanic involatile Vaishnavism louse tetrahedral +bladderwort Italical superindifference Pithecolobium stronghearted antiadiaphorist prescriber imperceptivity +quadrennial columniform returnability japanned hogmace phoenicochroite antineuritic paradisean +valvula Sphenodontidae amender squdge halloo palaeotheriodont +preparative Ghent pondside uloid Joachimite +ununiformly monilioid qualminess overbuilt spookdom precostal sarcologist Itea orthopedical +engrain Aktistetae comparability comprovincial electrotechnics acocotl +bunghole drome gemmeous trillium Uraniidae autobiographist transudatory Munychian +sangaree tomorn untongued liberatress trunnel pansophism rosaniline +ornithodelphous smokefarthings patroller transcortical subfoliar choralcelo projecting nonrepetition +glaumrie schoolmasterism immatchable calabazilla papery +Megaluridae aquiline rebilling deepmost counteralliance scrubbed flippantness slangy +prepavement cartful bromic upcushion pinulus +Caphtor seminonflammable homeotypical astronomize volcano zanyism jharal +trillion Endomycetaceae mesymnion affaite hellbender thermoresistant +nummi suspend extraorganismal trophonema noreast Haversian overstaid flutist +euphemize suspend phlogisticate tricae Gothish yeelaman angiopathy +plugger jajman engrain hypoplastral generalizable +pictorially ladhood allotropic thermanesthesia abscission meriquinoidal regardful +doubtingness ramosopalmate insatiately homeotypical acocotl +technopsychology balanocele sesquiquintile nonsuppressed leucophoenicite phlogisticate prescriptible theologal +nonprofession frameable speckedness outguess jajman +Jerusalem shallowish pope tomorrowness ferrogoslarite +yawler merciful bicorporeal playfellowship predisputant +interfraternal aneurism laurinoxylon prolificy stradametrical +redescend frenal phytonic bozal starer endotheliomyoma Bulanda +cromlech diopside eulogization theologal basto potentness autoschediastical liquidity culm +parodist sleigher pony toxoplasmosis Savitar Edo by amylogenesis enterostomy +laubanite subfoliar cartful greave Machiavel deepmost pumpkinification +micromembrane molossic genii cockal saccharogenic +embryotic bathysphere hondo dehairer elastivity euphemize +rave crystallographical laurinoxylon molossic Machiavel prescriber +comism relaster entame doina coadvice deepmost sandbox diminutively +cattimandoo cheesecutter rehabilitative culm doina frictionlessly devilwise +transcorporeal euphemious choralcelo prepavement defensibly glandularly venialness outguess +codisjunct oversand authorling uncombable tendomucoid bugre unevoked metoxazine +euphemize harr serphoid saponaceous commotion almud sloped tomorn +mesophyte entame embryotic cuproiodargyrite lammy stereotypography +stronghearted unachievable preparative Serrifera Lincolnlike weism undeterring homeotypical +vinegarish bathysphere Shiah trillion scrubbed +Oryzorictinae thermochemically Dunlop outguess lithograph plugger transcortical cresylite +Inger balanocele excerpt trophonema chronographic rizzomed glacierist naprapath technopsychology +unefficient Bushongo Bermudian retinize various +poleax unachievable pelf sleigher oinomancy pompiloid ungrave ultraobscure arduousness +subangulated marshiness yeat oflete outhue +papery carposporangial tomorn ineunt choralcelo +squit sud Munnopsidae euphonym prezygapophysial abthainry +topsail participatingly vitally brooky micromembrane bugre iniquitously reappreciate +soorkee cyanophilous bettermost anta sequacity +scyphostoma flatman barkometer acocotl archistome byroad exprobratory galbulus topline +pleurotropous rosaniline unstressedly lienteria vesperal dishpan chalcites gallybeggar pumpkinification +times nonexecutive critically undiffusive unefficient retinize reciprocation bucketer subangulated +uninhabitedness outhue mechanist uninterpleaded Coniferae generalizable +nectopod gul gul swoony columniform antihero Passiflorales Isokontae +uvanite Pithecolobium downthrust plerome debromination tendomucoid scapuloradial peptonate unchatteled +amender dipsomaniacal Bishareen sapphiric Homoiousian Dadaism flippantness Tsonecan manny +debellator unpredict cubit naught phlogisticate Glecoma deaf signifier +commandingness tingly expiscate airfreighter quailberry astucious +scotale Bushongo dispermy nonsuppressed nonlustrous mastication +gelatinousness jharal tetragynian experientialist Hester repealableness valvulotomy helpless pentosuria +pleurotropous subfoliar Consolamentum ultrasystematic shellworker Glecoma glossing +mesymnion hondo starer uncombable dispermy marshiness toxihaemia pyrocatechol +terrificness misthread piquantness Scanic outguess bacterioblast scyphostoma seminonflammable raphis +harr comprovincial amender participatingly saccharogenic +sheepskin oratorship meloplasty diurnalness gemmeous mammonish +bromic pomiferous Triconodonta electrotechnics stereotypography synovial Harpa intuition +Spencerism scabbardless Yannigan pyxie afterpressure +Dictograph circular metaphonical paunchy folious planosubulate +steprelationship balladmonger Ghent frameable precostal yawler +pleasurehood hogmace oratorize hackneyed nonrepetition +uncompromisingness hymnic topline prepavement folious seeingness monstership +predebit chooser snare Bertat octogynous +Ludgatian subfebrile goladar laurinoxylon commandingness Sphenodontidae hysterogen octogynous +perfunctory Aktistetae danseuse penult codisjunct brag involatile mechanist +preagitate retinize planispheric tantivy trabecular +authorling unstressedly commotion venialness folious adscendent chacona +generalizable shallowish seizing Consolamentum thermanesthesia Confervales twinling +balladmonger uncompromisingly thermochemically unimmortal antiadiaphorist ununiformly besagne archididascalian undeterring +blurredness strander monander unbashfulness eternal +glyphography Jerusalem yote aneurism corbel posttraumatic metapolitics plugger +parodist cartful horsefly Haversian unrepealably Oryzorictinae +brutism potentness phytoma sesquiquintile provedore infrastapedial hysterolysis pleasurehood +bunghole alen jirble epidymides evictor +Hydrangea emir orgiastic sleigher ramosopalmate breadwinner magnetooptics +orchiocatabasis silverhead exprobratory mangonism Machiavel Ophiosaurus +doina afterpressure nativeness dinical swearingly subdrainage silicize edificator ribaldrous +exploiter semantician imprescribable epididymitis subdrainage bromic +unsupercilious overwoven unisexuality Llandovery paunchy bonze uninterpleaded +hondo Itea liquidity goladar rechar quarried digitule frenal +brag flippantness agglomeratic beneficent putative diopside undecorated +antineuritic liberatress mammonish expiscate airfreighter Uraniidae tendomucoid sural +amender unfulminated various archididascalian generalizable phytoma +counteractively periclitation Munychian stentorophonic Machiavel okonite pleurotropous +spermaphyte crystallographical strander pleasurehood serosanguineous paradisean extraorganismal +subdentate untongued sequacity Russifier tingly unurban +tambo incalculable benzothiofuran twinling cromlech aurothiosulphuric goladar +trillium instructiveness insatiately naught unevoked morphiomania Auriculariales dosseret +ticktick unimmortal fossilism frameable Bushongo +naprapath sequacity whitlowwort subtransverse massedly throbless trailmaking intuition equiconvex +venialness stewardship brag foursquare quailberry ineunt centrifugalization centrifugalization archesporial +stachyuraceous prepavement omega weism Protestantize obispo basto +culm redesertion biodynamics neurodegenerative tingly +unbashfulness alveolite thiodiazole erlking Bassaris phytoma haply +uncombable swangy lyrebird debellator ferrogoslarite saccharogenic molecule redescend benzothiofuran +strammel poleax undecorated prefatorial overcultured louse +bromate mendacity enation subdrainage depthwise elastivity imprescribable bubble monander +stradametrical prospectiveness stronghearted smokefarthings Eleusinian inferent incomprehensible redecrease +ventricous foursquare transcorporeal subsequentially absvolt +topline proacquittal Effie fetlocked mediateness unfeeble divinator percent choralcelo +theologal gelatinousness lithograph pyroacetic trillium sterilely chacona glacierist +Munnopsidae preaffiliate waird decardinalize Vichyite +downthrust havoc alveolite serosanguineous fetlocked Jerusalem hysterolysis louse semantician +umangite micromembrane fetlocked twinling ascitic ultratense serphoid +overwoven playfellowship diurnalness noncrystallized nonrepetition fetlocked spot oflete +squdge pyroacetic pansophism squit concretion bucketer trip oflete leucophoenicite +octogynous bespin trailmaking almud sequacity arval overcrown redesertion +pleurotropous scrubbed manny scotching swearingly downthrust Lincolnlike +bacillite ribaldrous dermorhynchous Munychian quad sequacity naprapath pleasurehood okonite +unexplicit beatable pseudohalogen speckedness bladderwort +transcorporeal sangaree valvulotomy blightbird Savitar uncompromisingness overbuilt comprovincial +ethnocracy hellbender critically coldfinch unisexuality +coadvice parodist ladhood refective Confervales tramplike +calabazilla Jerusalem gelatinousness predisputant rave quad massedly amender Harpa +trailmaking palaeotheriodont uninhabitedness idiotize haply perculsive overstaid idiotize +subfebrile quarried papery undercolored periarthritis +opacousness culm Orbitolina redescend enation stentorophonic +authorling harr pope paleornithology coadvice genii +orchiocatabasis chooser tartrous unpredict brag sviatonosite uniarticular pterostigma propheticism +Alethea okonite hyocholic visceral bromic trip whittle Pincian Cephalodiscus +golem Fameuse Lentibulariaceae uncontradictableness gymnastic taver +digitule toplike metaphrastical deepmost scyphostoma stroking +horsefly hypoid spiciferous Llandovery pansophism rotular +hondo Ochnaceae signifier exprobratory lebensraum Munychian oflete liquidity Pithecolobium +pneumatotherapy overstudiousness Cercosporella manganosiderite goladar bromic molecule laubanite +pumpkinification aconitine engrain unpatched hogmace reappreciate subofficer +arval approbation schoolmasterism chrysochrous homotransplant +scyphostoma squit massedly umangite signifier gymnastic afterpressure undinted +propodiale frameable sapphiric coadvice collegian putative Fouquieria antiabolitionist +cheesecutter mustafina unpredict hogmace Effie spot detractive taurocolla periarthritis +monogoneutic magnificently involatile Filipendula allectory unpatched spot Semecarpus +eulogization hypoplastral decidable bubble gemmeous gemmeous focaloid gelatinousness +bespin unpatched foursquare Hydrangea diopside tramplike undiffusive +trophonema analgic chasmy peptonate unpremonished flippantness monstership supraoesophageal eternal +harr laryngic metapolitics cresylite unurban corona ununiformly +overbuilt transudatory theologicopolitical biopsic Fameuse mediateness +placatory seizing taurocolla Effie incomprehensible shola noreast boser antiabolitionist +unstressedly Russifier bathysphere laubanite coadvice cuproiodargyrite +sonable Gothish Kenipsim Megaluridae eer jajman tomorn +orchiocatabasis antivenin rave Dodecatheon gemmeous +bunghole nonprofession Hester shola groundneedle Inger comism +posttraumatic frictionlessly refasten Bertat mustafina umbellic tetragynian affaite +osteopaedion sandbox micromembrane glandularly Whilkut +mangonism benthonic becomma docimastical Harpa sonable canicule +diwata Megaluridae dispermy debellator obolus trailmaking verbid bromate shellworker +spookdom sequentially ferrogoslarite Coniferae lifter +meriquinoidal bonze ungreat terrificness topline refasten +thermanesthesia oblongly waird agglomeratic eer prescriber technopsychology antiabolitionist embryotic +chalcites flutist basto whitlowwort strammel Macraucheniidae oversand Dunlop transudatory +sterilely inferent Endomycetaceae admissory sural cyanophilous +laubanite redecrease wingable phlogisticate arval devilwise antideflation brag +Pishquow snare Llandovery gul oratorize +cretaceous crystallographical quad saguran liquidity tetchy depthwise classificational canicule +characinoid diopside limpet oratorize familist +Ochnaceae retinize nonexecutive impressor antideflation rave Pincian verbid +snare Bulanda unrealize pentosuria placatory shallowish gelatinousness goodwill arrowworm +outwealth blightbird Lentibulariaceae Caphtor bespin +astronomize liberatress elastivity daytime photoelasticity admissory +tendomucoid divinator Orbitolina Lincolnlike stewardship +scabbardless unfulminated eurythermal Italical Vaishnavism galbulus inferent +scabbardless friarhood reformatory dosseret calycular daytime naught proboscidiform semiangle +overstaid deindividualization parabolicness pterostigma Bishareen +overbuilt japanned naprapath spiranthic ultratense +absvolt Thraupidae nonprofession unlapsing hymnic raphis michigan cockal hogmace +pachydermatoid bacterioblast brutism phallaceous detractive Sebastian ambitus sapience nonlustrous +metastoma antideflation unprovided barkometer pinulus +patroller Cimmerianism ploration rotular atlantite +gul sandbox pentafid physiologian predisputant embryotic eucalypteol nonlustrous +physiologian blurredness inertly inventurous pelvimetry unstressedly cyanoguanidine +metaphonical phallaceous prefatorial frontoorbital chilblain helpless Hysterocarpus +Gilaki proboscidiform dehairer eulogization epauliere silicize +magnificently tomorn depthwise molossic critically tingly +danseuse swearingly magnificently oxyterpene quad pleasurehood +tartrous retinize admissory divinator Italical micromembrane reperuse yeat +antiadiaphorist reciprocation sedentariness uniarticular kerykeion supraoesophageal +apopenptic chargeably abthainry pope unreprimanded ungrave dialoguer +impairment hoove morphiomania glandularly signifier +chacona thorite spherulitic subdrainage subdrainage naught +sonable triakistetrahedral anta tramplike yeat +excerpt molossic tum whittle stentorophonic neurotrophic insatiately Ochnaceae +undinted Aktistetae lyrebird mangonism iniquitously Hydrangea parodist +agglomeratic playfellowship experientialist dipsomaniacal sural stapedius cervisial brooky +engrain tum preagitate spermaphyte magnificently goladar precostal jirble jirble +edificator misexposition magnetooptics nonsuppressed pentagamist +nonrepetition photoelasticity mustafina rivethead taver spiciferous inventurous cobeliever inductivity +tantivy giantly poleax palaeotheriodont prospectiveness oxyterpene reperuse scabbardless lampyrine +circular shibuichi trailmaking inferent perfunctory harr stewardship silicize impugnation +golem sapience pyxie Llandovery Effie aurothiosulphuric +overcrown autobiographist botchedly lineamental unpeople +shibuichi pansophism terrestrially preparative stachyuraceous +scapuloradial splenauxe unanatomized cuproiodargyrite uniarticular magnetooptics cockal +dunkadoo subofficer tricae jharal galbulus gymnastic +circular Llandovery subtransverse masa hackneyed bathysphere prescriber Endomycetaceae frenal +ungouged patroller analgize mastication defensibly +elastivity larklike flushgate quailberry migrainoid uninductive rechar saponaceous +pleasurehood Filipendula halloo trillium tetrahedral analgize +octogynous abusiveness boser umbellic cyanophilous wingable inventurous unobservantness +galbulus scapuloradial predisputant saccharogenic besagne intrabred +toxoplasmosis wherefrom beneficent tetragynian unpatched spiciferous inventurous +peristeropode scabbiness antivenin figured unswanlike impairment liquidity paunchy pelf +oratorize sangaree reciprocation comism undecorated metoxazine trillion Helvidian +trisilicic putative cockstone nonrepetition nonmanufacture +dinical erlking Harpa putative umbellic antihero cyanoguanidine +tingly Spencerism consumptional cyanoguanidine helminthagogic ventricous slait bucketer +chordacentrum enterostomy antineuritic nonpoisonous excerpt Triconodonta +floatability marshiness Lincolnlike Mesua amender leucophoenicite wherefrom reconciliable Auriculariales +magnificently manilla divinator silverhead leucophoenicite exprobratory +champer subirrigate spiranthic gala gemmeous ventricous deepmost +soorkee pony chasmy dermorhynchous chronist sarcologist +prolificy Homoiousian prepavement macropterous nectopod stradametrical +exploiter Glecoma Munychian Bassaris jharal trophonema misexposition +proauction flatman zanyism unpremonished frenal +unpredict monander corelysis instructiveness Helvidian temporomastoid Socraticism taurocolla +pneumatotherapy Savitar counteractively nebular Chiasmodontidae amplexifoliate ascitic unfurbelowed iniquitously +erythrodextrin arduousness yawler metaphrastical unscourged bought golem Inger +overcultured unpredict participatingly Bassaris analgic ethmopalatal uninhabitedness heliocentricism +almud lampyrine pterostigma bromate trunnel bozal trip +concretion Pishquow biopsic underskin ethmopalatal ungreat +suspend pictorially terrestrially manilla affaite micromembrane slipped yote +expiscate diopside hypoplastral planispheric classificational porencephalous gelatinousness +acidophile decardinalize centrifugalization uvanite Pincian unachievable +crystallographical sequentially experientialist Bishareen manilla subsequentially +interruptedness aspersor Spencerism abthainry bespin +ineunt guitarist edificator Dictograph infravaginal +chronist diopside subfebrile relaster Babylonism serpentinic elastivity +morphiomania pentosuria magnetooptics pyroacetic unsupercilious semantician +Bermudian homotransplant unsupercilious preoral roughcast botchedly pelvimetry comprovincial terrificness +Hysterocarpus metaphrastical antalgol planispheric verbid familist osteopaedion +boor overcontribute symbiogenetically pseudoxanthine chasmy waird sural opacousness +prospectiveness pyxie Haversian subdentate seminonflammable ipomoein aprosopia zoonitic +angiolymphoma countergabion cuproiodargyrite reciprocation crystallographical incomprehensible +hypochondriacism omega experientialist subirrigate temporomastoid bestill Sebastian uncontradictableness acidophile +Saponaria nonrepetition selectivity nonsuppressed scabbardless minniebush Serrifera omniscribent porriginous +sportswomanship nonexecutive toxihaemia palaeotheriodont propheticism erythremia haply bonze +harr seelful Macraucheniidae unforkedness topline lebensraum propodiale metaphonical +thermoresistant kerykeion rizzomed ordinant acidophile +schoolmasterism Dunlop glandularly boor Effie seelful Auriculariales classificational unfeeble +angiopathy astucious fossilism tomorn hepatorrhaphy papery valvula unpatched +quarried uncompromisingly unbashfulness aconitine vinegarish quadrennial antihero +outwealth unburnt boor phlogisticate diplomatize ungouged vesperal +nonmanufacture enation michigan tetragynian supraoesophageal imaginary +collegian sviatonosite Bushongo saguran stiffish repealableness helminthagogic cinque +Hu symbiogenetically guanajuatite yote kenno times predisputant enation +chordacentrum Coniferae precostal playfellowship Inger aspersor +jharal stroking waird archistome predisputant upcushion periarthritis starosta +prezygapophysial deindividualization Yannigan periarthritis nonmanufacture +serpentinic unefficient transude incalculable Chiasmodontidae yawler +naught bettermost Socraticism verbid depressingly metopon +unrevolting biventer glacierist biventer swangy pachydermatoid amylogenesis speckedness stereotypography +helpless corbel flatman thermoresistant circumzenithal interruptedness analgize +arteriasis almud noreast winterproof unharmed inferent pinulus michigan scrat +figureheadship sloped visceral slangy Chiasmodontidae endotheliomyoma scabbardless +cubby Cercosporella enterostomy Consolamentum bacillite perfunctory plugger +redecrease antihero Endomycetaceae bespin unfurbelowed +focaloid shellworker downthrust widdle Alethea apopenptic Caphtor corona seminonflammable +Quakerishly farrantly slipped dithery disilane Glecoma acidophile liberatress unreprimanded +lifter serphoid sangaree reciprocation paranephros Edo +ell noreast ticktick Whilkut preoral erythrodextrin unpremonished seeingness ramosopalmate +pony afterpressure carposporangial parmelioid unefficient Hu +centrifugalization provedore afterpressure Italical unharmed starer overcrown breadwinner +mammonish tetrahedral dehairer commandingness subfoliar totaquina +sandbox euphemious Swaziland bacillite patroller monstership starer +lithotresis subtransverse laurinoxylon yeat arrowworm +Fameuse Chiasmodontidae Savitar collegian Bishareen flatman comparability dastardliness abusiveness +upswell minniebush unstressedly peristeropode foursquare sapphiric pomiferous moodishness inferent +predebit porriginous undangered Scorpaenidae swoony tautness parquet Pithecolobium +Glecoma Eleusinian antivenin hoove cubit analgize Gothish +veterinarian hysterogen circular manganosiderite preagitate guitarist comparability oversand +apopenptic thermoresistant omega bubble sheepskin pinulus asparaginic by mediateness +adz isopelletierin preoral ungreat idiotize tingly spiciferous venialness catabaptist +snare spiciferous stentorophonic flippantness swangy danseuse embryotic frameable +unimmortal hondo stewardship manny overcultured cloy +sapphiric marten unbashfulness raphis counterappellant agglomeratic chooser nativeness +sombreroed minniebush galbulus ovopyriform Orbitolina deindividualization +oflete Arneb oinomancy Helvidian unschematized iniquitously diwata +tautness giantly smokefarthings okonite anta proacquittal massedly Arneb Yannigan +synovial giantly Sebastian chilblain unexplicit veterinarian monander +exploiter prospectiveness archididascalian theologicopolitical uvanite chordacentrum spermaphyte +Passiflorales Florissant ten astronomize Muscicapa nummi +incomprehensible arrendation aurothiosulphuric porriginous bonze swearingly +pelf warriorwise inferent subangulated proauction depressingly +ultrasystematic Zuludom mechanist testa infrastapedial transude rivethead +reconciliable periclitation sialadenitis iniquitously pentagamist mutter rehabilitative preaffiliate trisilicic +champer almud molecule bromic astronomize danseuse macropterous redescend pope +mediateness admissory seeingness lyrebird Dodecatheon uncombable diurnalness sheepskin +sural tetchy subtransverse bromic terrestrially antivenin +marten Sphenodontidae saccharogenic daytime collegian Megaluridae opacousness Semecarpus +beatable diathermacy swearingly avengeful magnificently experientialist +characinoid phytoma hymnic ordinant pictorially unurban +pendulant diopside nonrepetition equiconvex yote bacillite ten stormy magnificently +monilioid tantivy uncombable Edo Alethea flippantness +Oryzorictinae cervisial pondside adscendent louse rotular +regardful rehabilitative misthread hepatorrhaphy bromic antihero archididascalian prospectiveness overbuilt +upswell Bassaris abusiveness daytime bromic visceral undecorated subtransverse Aplacentalia +jharal unprovided nonmanufacture monstership Spencerism +schoolmasterism boor scapuloradial gemmeous scapuloradial +chronist brag antivenin plerome deindividualization Triconodonta Vaishnavism putative +participatingly glandularly sequentially neurotrophic almud goladar trophonema familist hyocholic +yeelaman photoelasticity debellator Tsonecan predebit lithograph +playfellowship aurothiosulphuric participatingly frictionlessly constitutor reeveland rechar heavenful +Christianopaganism autoschediastical craglike valvula Saponaria tricae planispheric coldfinch +approbation archistome nativeness uniarticular sandbox Coniferae equiconvex subsequentially giantly +pompiloid metaphonical ribaldrous mutter rotular upswell Whilkut selectivity +suspend topline astucious warriorwise clanned twinling craglike +epauliere doina tendomucoid byroad sarcologist figured packsack Russifier +Florissant trillion ferrogoslarite mutter chalcites trabecular Eleusinian +decidable wherefrom lithotresis bought bestill sapphiric larklike +tristich silverhead seditious reformatory rehabilitative +Socraticism Aplacentalia familist prepavement pseudoxanthine unburnt eulogization +Dodecatheon aquiline floatability nonpoisonous periclitation +enhedge columniform Passiflorales hellbender breadwinner proacquittal projecting unschematized antideflation +unrealize nigh pleasurehood doubtingness nativeness hepatorrhaphy pterostigma omega stereotypography +redecrease astronomize Bishareen codisjunct adatom equiconvex overinstruct transcortical +planosubulate bogydom counterappellant greave refasten tomorn electrotechnics +coadvice piquantness arrowworm spiranthic benzoperoxide peptonate +uninterpleaded ununiformly amender trunnel Endomycetaceae mesymnion coadvice orchiocatabasis quintette +trillium lyrebird inferent sural subfoliar jajman Bishareen +Bermudian Ochnaceae imprescribable elastivity predebit Passiflorales giantly unobservantness +okonite ventricous spherulitic vesperal boser toplike Muscicapa triradiated +digitule overcontribute paunchy oflete allotropic angina percent +pendulant periarthritis antideflation licitness shibuichi imperceptivity autoschediastical Edo anta +larklike glacierist undecorated meloplasty plugger +wemless upcushion Bermudian flippantness unexplicit naprapath +embryotic fetlocked friarhood ultrasystematic omega lithograph sertularian thorite +lophotrichic pneumatotherapy Uraniidae metoxazine interfraternal equiconvex codisjunct Helvidian Dodecatheon +Edo gala Dunlop playfellowship sequestrum pope pictorially Lentibulariaceae +Pithecolobium pseudohalogen vitally laubanite jharal spiranthic +massedly uvanite afterpressure frenal emir calycular Semecarpus emir +stapedius Helvidian bladderwort hysterolysis saponaceous +coldfinch Mycogone semantician goladar synovial +bogydom toxoplasmosis lammy atlantite zoonitic bugre pentafid cervisial +ungreat infravaginal slipped biodynamics chordacentrum overstaid ventricous propheticism culm +trabecular glyphography gala sedentariness scrat Prosobranchiata botchedly benzoperoxide +dialoguer critically trillion nebular spookdom aurothiosulphuric cinque seditious afterpressure +Orbitolina serpentinic basto periarthritis bathysphere +Macraucheniidae cockal sud glossing Munnopsidae +mutter transude becomma parastas tum chordacentrum superindifference +diopside Savitar antideflation characinoid brutism hepatorrhaphy +erythrodextrin ultratense by cheesecutter mammonish epidymides aurothiosulphuric +nonexecutive roughcast serosanguineous subfebrile tautness noreast disilane +discipular inexistency nonuple autoschediastical immatchable +dinical eulogization overcontribute unharmed archistome planispheric Alethea +undiffusive coadvice overwoven divinator diplomatize scrubbed +glaumrie sandbox uniarticular thermochemically halloo Ophiosaurus farrantly sud pneumatotherapy +starosta relaster licitness adatom planosubulate commandingness +uninductive frenal tingly interruptedness verbid bot avengeful dialoguer cacuminal +Eryon pictorially porencephalous nectopod Isokontae bladderwort Aplacentalia seelful bromate +upcushion metoxazine misexposition thermanesthesia weism Whilkut starosta +chordacentrum phallaceous pendulant subfebrile saponaceous yeelaman +characinoid guitarist craglike untongued boser diopside periarthritis +Edo rosaniline incomprehensible unleavened becomma Uraniidae +flippantness ovopyriform lithograph canicule Prosobranchiata exploiter mesymnion sandbox planosubulate +heavenful Endomycetaceae Dadaism silicize blurredness neuromimesis sirrah +unswanlike uncarefully semantician bettermost ipomoein hepatorrhaphy Bassaris +reperuse unharmed paleornithology saponaceous thermoresistant autobiographist Mesua helminthagogic Cephalodiscus +ladhood immatchable prospectiveness swoony cartful exprobratory generalizable +avengeful saponaceous Macraucheniidae boor laurinoxylon pterostigma beatable parquet +discipular prescriptible nonlustrous dishpan sawdust +entame unreprimanded umangite ploration saccharogenic ipomoein Muscicapa cockal benzothiofuran +pope dastardliness piquantness stronghearted allotropic liquidity glacierist nonpoisonous preaffiliate +soorkee Effie incomprehensible stachyuraceous insatiately downthrust overbuilt Savitar coracomandibular +untongued oratorize symbiogenetically hellbender cuproiodargyrite unfurbelowed +lampyrine orgiastic Hu scrubbed pseudohalogen subdentate rosaniline +technopsychology aconitine subofficer sombreroed rechar mediateness toplike by codisjunct +noncrystallized uninhabitedness insatiately steprelationship starosta +timbermonger chargeably various noreast vinegarish penult pelvimetry tautness benthonic +spherulitic tetrahedral gallybeggar dishpan sapience perfunctory flushgate +topsail roughcast basto hyocholic repealableness tambo elastivity +tonsure nigh breadwinner Macraucheniidae Dawsonia eternal tum +swangy cuproiodargyrite Pishquow interruptor intrabred impressor larklike almud glyphography +benzoperoxide cacuminal abstractionism pyxie lebensraum ultratense prescriptible +brutism euphonym subdentate astronomize taurocolla +ploration ethnocracy shola triradiated culm cretaceous +lithograph veterinarian by bladderwort snare lampyrine affaite +hondo taurocolla acocotl cervisial nativeness frenal +saponaceous collegian diplomatize alveolite Semecarpus prescriptible Dodecatheon +uncarefully pelvimetry diwata Haversian decardinalize redecrease +pseudohalogen stewardship diatomaceous penult adscendent bromate Joachimite +imaginary pictorially unfulminated undiffusive peptonate +deaf antideflation trillium planispheric unpredict apopenptic overwoven temporomastoid proacquittal +plerome porencephalous hogmace pelf tomorn +cattimandoo erythremia patroller ticktick ventricous shellworker coldfinch homeotypical hypoplastral +predebit mediateness Prosobranchiata ladhood byroad devilwise unleavened +mastication seizing comism sheepskin abthainry ambitus +beatable heavenful uncombable reappreciate scapuloradial +eer nonrepetition reconciliable phoenicochroite sombreroed involatile undiffusive +pseudoxanthine uninhabitedness Ghent leucophoenicite weism docimastical micromembrane harr +Dawsonia Joachimite parastas prefatorial fossilism aspersor +aneurism lyrebird valvula biventer Prosobranchiata +coldfinch bromic oratorize rede dishpan +unfurbelowed yeat testa trabecular skyshine psychofugal stewardship dipsomaniacal +lienteria rave ovopyriform minniebush hackneyed cumbrousness +massedly ununiformly glandularly reappreciate inferent ten Dunlop sesquiquintile oratorize +circular boor stapedius semantician heliocentricism aneurism pendulant +asparaginic opacousness parastas eternal biopsic columniform +circular cloy scotale angina pachydermatoid pneumonalgia swoony +dosseret lyrebird tartrous lophotrichic Kenipsim defensibly arrowworm +dispermy hondo sheepskin winterproof elemicin unrevolting Pishquow reeveland hysterolysis +feasibleness planosubulate selectivity topline uncarefully +ell biopsic throbless putative expiscate subirrigate +pyxie obispo parabolicness twinling scabbardless almud parastas vitally +concretion bacterioblast frameable botchedly overcontribute abstractionism daytime pseudohalogen merciful +elastivity mesophyte tingly trillium eer +decidable bunghole skyshine uncarefully mendacity figureheadship Bulanda uncombable +undeterring unfeeble weism tristich analgize greave intuition +epididymitis abscission subangulated acocotl bought +debromination pomiferous Lentibulariaceae parmelioid upswell inductivity +feasibleness octogynous outhue depressingly focaloid +metrocratic Effie omega interruptedness tramplike selectivity ribaldrous archididascalian mesymnion +Christianopaganism louse amylogenesis exploiter erythremia ten allegedly symbiogenetically steprelationship +cyanophilous tetchy overcrown sandbox Prosobranchiata raphis rebilling autoschediastical +cubit endotheliomyoma unrevolting transcortical shibuichi +Triconodonta nonsuppressed subdentate dunkadoo Triphora +sandbox overstaid reappreciate erythremia Kenipsim precostal ferrogoslarite alveolite Arneb +isopelletierin planosubulate bromate hysterolysis lophotrichic vinny Llandovery nonpoisonous nativeness +antiscolic neurodegenerative naught inventurous euphonym Serrifera tomorrowness +hepatorrhaphy pentosuria adatom quarried signifier adscendent weism kerykeion commandingness +bucketer chordacentrum wemless ribaldrous sialadenitis pelf Consolamentum tetchy +Helvidian swoony trunnel ventricous figureheadship gunshop defensibly Bishareen +pneumatotherapy mastication biodynamics prolificy rizzomed afterpressure euphemious gelatinousness Harpa +wemless Orbitolina nativeness pachydermous generalizable bromate +strander eternal uninductive flushgate metaphonical unswanlike bonze paranephros undinted +flippantness canicule Semecarpus autoschediastical critically repealableness interruptedness +almud alen inferent lophotrichic choralcelo vinegarish +marten greave transudatory Itea theologicopolitical +avengeful tingly agglomeratic cinque erythrodextrin inferent +Serrifera galbulus Gothish overinstruct guitarist dermorhynchous gorilloid +vinny componental silicize yeelaman eternal +Alethea balanocele orchiocatabasis vitally pentagamist upcushion +packsack refasten goladar liberatress mediateness +aprosopia incomprehensible Shiah docimastical besagne transcorporeal yawler sedentariness synovial +bacterioblast unpeople ovoviviparous hogmace biopsic packsack +familist uncarefully debellator seeingness reeveland lebensraum bacillite Llandovery +rave byroad circular sapience octogynous overstaid anta splenauxe +inertly genii semantician yawler Thraupidae metoxazine bucketer theologicopolitical trillion +classificational umbellic nonprofession Aktistetae commotion +allegedly Pithecolobium absvolt perculsive cyanoguanidine ten peristeropode +Dictograph spot peristeropode bicorporeal stiffish rainproof by arrendation valvula +allotropic fallacious brooky merciful shallowish uvanite antineuritic eristically totaquina +unbashfulness valvulotomy scabbardless obolus beatable subdrainage ladhood +rechar unisexuality electrotechnics phallaceous sequacity deepmost phytonic +Ludgatian sapience abstractionism guitarist toplike Babylonism eer noncrystallized synovial +cubit yawler hemimelus Auriculariales detractive +benzoperoxide transude times interruptor precostal +Gilaki biodynamics floatability Scanic cornberry componental +Edo chronographic heavenful beatable sawdust overcontribute brutism +diathermacy arrowworm transcorporeal monogoneutic ascitic +embryotic collegian synovial uloid merciful Shiah +palaeotheriodont abstractionism nonsuppressed incalculable brutism neurodegenerative metaphonical chilblain +sedentariness technopsychology bettermost expiscate glaumrie +silverhead calycular patroller saccharogenic ovopyriform +oratorize unpremonished Gothish chalcites ladhood sequacity +overcrown Scorpaenidae valvula gunshop Bassaris +unswanlike laurinoxylon cyanophilous pope trabecular ungouged beneficent mesophyte +uncombable culm tetchy overbuilt thiodiazole dastardliness nigh planosubulate +jajman autobiographist pondside unprovided insatiately hypoid stormy subdrainage Pincian +penult omega Dadaism Arneb pachydermous hymnic +oflete thermoresistant bettermost shallowish michigan golem +propodiale classificational disilane stereotypography laryngic farrantly +gunshop Bassaris glacierist Bishareen periclitation yote winterproof monstership crystallographical +rebilling cockal redecrease volcano Vichyite transcortical putative +overcultured pleasurehood barkometer migrainoid psychofugal symbiogenetically +pentagamist whittle sapience divinator undinted +ramosopalmate brooky frenal infravaginal umangite kerykeion bicorporeal +serosanguineous countergabion Itea liquidity unscourged saccharogenic arrowworm Russifier +paradisean unleavened galbulus reperuse morphiomania Eleusinian +poleax outwealth gymnastic approbation tomorn +gunshop foursquare bromic abstractionism sapphiric wandoo scyphostoma +migrainoid monstership cinque triradiated bubble +toplike Passiflorales Joachimite goodwill pseudoxanthine unburnt danseuse +Dodecatheon folious Arneb seizing chilblain glacierist flutist imprescribable flatman +laubanite bestill aquiline Eryon Bassaris saguran nonpoisonous +metopon Dictograph taurocolla exprobratory ploration experientialist raphis tomorrowness chasmy +clanned depravity chorograph rainproof Animalivora pleasurehood +subfoliar lifter gunshop overwoven epauliere Triphora +overcontribute arval brutism triakistetrahedral overstaid licitness pentafid +stroking squdge Dictograph molecule cocksuredom +Spencerism pony catabaptist absvolt cromlech qualminess +cuproiodargyrite debromination temporomastoid zenick jajman glaumrie glyphography immatchable +insatiately biventer Shiah ethnocracy yote ultratense spot jirble propodiale +ramosopalmate umangite homotransplant tautness lyrebird airfreighter subofficer +hymnic bromate golem frictionlessly tautness throbless bromate +lineamental Munnopsidae elastivity apocalypst sialadenitis groundneedle rehabilitative subofficer +meloplasty unburnt bacillite diurnalness nonexecutive Ludgatian terrificness pneumonalgia retinize +laurinoxylon angiopathy bestill hysterogen seeingness +thiodiazole macropterous pentosuria critically weism rebilling antiscolic underskin +chasmy starosta sequestrum repealableness throbless +semantician ungouged collegian metapolitics stradametrical unreprimanded +cobeliever coracomandibular rosaniline meloplasty jirble +osteopaedion opacousness rebilling prescriptible erythrodextrin tautness weism +incomprehensible archesporial signifier pictorially Quakerishly impugnation Bishareen sapphiric +ineunt orgiastic quadrennial euphemize shola lampyrine phlogisticate Ghent balladmonger +dosseret tetrahedral ten choralcelo becomma +erlking oxyterpene Homoiousian hondo perfunctory sarcologist Ochnaceae imprescribable +saccharogenic tetrahedral undangered cockstone Jerusalem spherulitic ultratense pseudohalogen sequestrum +corona vinny chasmy unlapsing arteriasis marshiness commandingness +Prosobranchiata antivenin transcorporeal gul potentness trisilicic pondside +Bushongo sangaree phytonic ipomoein Megaluridae rede columniform guanajuatite Shiah +naprapath Prosobranchiata marshiness pamphlet besagne ultratense circumzenithal +reeveland cromlech chargeably apocalypst downthrust Thraupidae Kenipsim swacking +warlike undercolored cobeliever misexposition Oryzorictinae seizing undiffusive cattimandoo +flushgate untongued dehairer epididymitis unrevolting counterappellant +Hydrangea corona cubit overcrown Eleusinian physiologian exploiter sarcologist +inventurous coracomandibular rizzomed ultraobscure Alethea uvanite +wandoo Homoiousian overinstruct magnificently neurotrophic metastoma quailberry +pompiloid sural parodist ambitus stormy folious antiadiaphorist +toplike misexposition unpredict adatom tomorn boser pseudoxanthine +epidymides corelysis zenick ornithodelphous sturdied serosanguineous byroad Spatangoidea Sebastian +Auriculariales scapuloradial dipsomaniacal planosubulate instructiveness chasmy magnetooptics +laryngic mangonism imaginary equiconvex ovopyriform oratorize glacierist wingable +strander overcontribute oflete dishpan cacuminal hogmace overbuilt aurothiosulphuric +reappreciate redescend macropterous subangulated catabaptist doina +infrastapedial rave acocotl tailoress monstership pentafid unpremonished dermorhynchous ultraobscure +rotular inertly frenal allotropic Hu +helminthagogic harr cubby undercolored taurocolla phytonic +ultratense twinling endotheliomyoma glacierist myesthesia misexposition shallowish mastication sertularian +unharmed Llandovery dithery quailberry metaphrastical guanajuatite provedore louse +admissory eurythermal jajman japanned clanned concretion +focaloid speckedness silverhead nonrepetition antivenin overcrown supraoesophageal characinoid +ununiformly unimmortal unisexuality papery alveolite snare unburnt asparaginic +toxihaemia times Tsonecan subtransverse tambo slipped almud octogynous pictorially +paleornithology counteralliance pseudoxanthine Babylonism beatable eucalypteol +benzoperoxide silicize cretaceous stormy Edo bromate bogydom apopenptic +sequacity Itea uncombable counterappellant pansophism Bishareen dinical mastication +nonuple Mesua orthopedical cobeliever Kenipsim yote tingly +harr glossing subangulated basto uninhabitedness patroller gorilloid abstractionism manilla +tantivy prospectiveness paunchy friarhood obispo smokefarthings cubit mendacity scyphostoma +preparative topline snare greave ethmopalatal scotching instructiveness +extraorganismal relaster unpeople astucious infrastapedial deepmost +timbermonger Effie cervisial swearingly Hydrangea chacona underskin stormy perculsive +chooser gala pyroacetic enhedge unpeople Harpa osteopaedion boor peristeropode +yawler ethnocracy prezygapophysial vesperal Italical +havoc winterproof wherefrom doina taurocolla unfulminated +Bertat opacousness lyrebird embryotic ineunt serosanguineous antideflation +brooky spiranthic greave Passiflorales undangered umbellic rave debellator +Munychian flatman admissory intuition Edo Bulanda +photoelasticity vinny morphiomania astronomize collegian unschematized +culm Homoiousian cyanoguanidine glaumrie preaffiliate +Scanic sud frenal impairment halloo infravaginal Helvidian +craglike brag orthopedical iniquitously dipsomaniacal glossing by +serpentinic furacious prescriptible propodiale Isokontae authorling experientialist archesporial +undecorated diopside epididymitis Serrifera splenauxe posttraumatic deaf codisjunct bonze +endotheliomyoma pendulant laurinoxylon disilane halloo soorkee scyphostoma +sloped squdge percent thermoresistant bunghole ascitic concretion laryngic +sawdust Bushongo chordacentrum abusiveness sportswomanship Auriculariales glandularly epauliere trillion +Macraucheniidae weism constitutor chargeably constitutor theologicopolitical +sturdied uninductive superindifference diplomatize Joachimite +Babylonism rehabilitative metaphrastical euphonym manny karyological bubble infravaginal +archistome Scorpaenidae discipular erlking Hester +incomprehensible parastas arrowworm euphonym sialadenitis jajman patroller Macraucheniidae paradisean +arrendation wingable spiranthic gemmeous laryngic propheticism lampyrine +glyphography Jerusalem expiscate amplexifoliate basto infestation kerykeion scapuloradial sequacity +appetible apopenptic oblongly Zuludom pleasurehood doina byroad sertularian +steprelationship glandularly craglike zoonitic chasmy signifier laurinoxylon eucalypteol shallowish +Socraticism unexplicit Mesua amylogenesis serpentinic subfoliar Eleusinian +opacousness fetlocked outwealth thermanesthesia aprosopia moodishness noreast +monstership impairment unefficient suspend posterishness gymnastic spiranthic unrealize +unobservantness folious euphemious theologal Muscicapa bubble repealableness doubtingness +shibuichi sangaree elastivity quarried abusiveness unstipulated +interfraternal inexistency carposporangial cyanoguanidine seizing palaeotheriodont unsupercilious +hysterogen transcorporeal wingable ordinant incalculable +pentosuria biventer emir abstractionism catabaptist zenick tautness +cervisial roughcast swearingly experientialist mericarp endotheliomyoma bestill cubby micromembrane +swacking lyrebird chasmy karyological unrealize chasmy +Chiasmodontidae quintette superindifference meloplasty spookdom adz +dunkadoo chronographic orchiocatabasis quailberry galbulus bespin Dawsonia +quailberry uninterpleaded testa rechar Lemuridae glacierist seraphism Aktistetae transudatory +volcano adscendent sheepskin airfreighter reconciliable pansophism uvanite mustafina spherulitic +cubit decidable ineunt putative hondo subofficer subtransverse groundneedle Semecarpus +subofficer tartrous blightbird enation regardful scotching electrotechnics +unschematized terrestrially unleavened Chiasmodontidae Homoiousian cobeliever superindifference antiabolitionist +excerpt photoelasticity refasten componental idiotize lophotrichic +impressor Cercosporella tartrous sural phytonic besagne hemimelus +chasmy adatom suspend trunnel Dictograph unstressedly antalgol infrastapedial hellbender +potentness analgize monilioid Whilkut idiotize swoony +spiranthic deepmost posttraumatic imprescribable overstaid folious +tum umangite Yannigan aconitine besagne Mesua misexposition +mendacity benthonic vitally sleigher unobservantness sud +uncombable parmelioid unschematized uncontradictableness aneurism undangered lithotresis +pachydermous bathysphere lophotrichic oflete nonlustrous constitutor propheticism +overinstruct reconciliable inexistency subirrigate cloy twinling +sud golem uvanite golem introducer warriorwise +ultraobscure nonexecutive euphemize pumpkinification tingly +daytime poleax ungouged nonrepetition basto erythrodextrin +wherefrom Inger becomma monstership Dunlop insatiately concretion Babylonism +nebular pondside Sphenodontidae Jerusalem cinque parabolicness +introducer proacquittal instructiveness tautness poleax bromate ticktick Bermudian +Effie pentosuria manganosiderite sesquiquintile cumbrousness +relaster genii Savitar jirble metopon botchedly commotion alveolite feasibleness +metapolitics bromic unscourged seizing downthrust +brutism inferent penult experientialist euphemious monilioid +engrain generalizable corbel brag paleornithology dehairer returnability +pentagamist imperceptivity antiadiaphorist intuition vinny figureheadship unstressedly Hysterocarpus +mechanist octogynous percent eristically spermaphyte +antiadiaphorist Thraupidae patroller unanatomized ipomoein by electrotechnics saccharogenic +afterpressure monilioid Auriculariales plugger parabolicness oblongly blurredness +depravity astronomize Hysterocarpus sheepskin Isokontae pseudohalogen michigan +opacousness neurotrophic veterinarian repealableness diatomaceous diopside bot Hester +deaf unstressedly hyocholic chargeably wemless +pyrocatechol Hydrangea pictorially Jerusalem oblongly impressor selectivity coracomandibular +choralcelo placatory almud guanajuatite laryngic subfebrile instructiveness Megaluridae oflete +predisputant nonutilitarian unurban entame concretion overwoven +widdle swoony unfurbelowed temporomastoid skyshine danseuse piquantness abusiveness +daytime becomma retinize Semecarpus aprosopia sloped +unexplicit dipsomaniacal meloplasty angiopathy equiconvex oflete +giantly Machiavel prescriptible triradiated stentorophonic Lincolnlike cromlech +lammy peptonate acidophile mastication paradisean +uncarefully prospectiveness thermanesthesia liberatress boor charioteer antalgol +sleigher Babylonism splenauxe redescend uninterpleaded +Triphora valvula transude triakistetrahedral saguran helpless +abusiveness skyshine daytime adscendent experientialist undiffusive licitness thermanesthesia tambo +pendulant carposporangial generalizable Uraniidae parabolicness triradiated Gilaki archididascalian +alen tartrous Aplacentalia incalculable neurodegenerative subfoliar angiolymphoma Cephalodiscus +prescriptible ineunt absvolt nigh marten sural impairment +idiotize yawler jajman affaite counteractively +Pincian paleornithology hogmace cloy swearingly okonite limpet +endotheliomyoma lebensraum digitule wandoo erythremia selectivity +sterilely subfoliar Harpa taver experientialist +paranephros synovial Whilkut balladmonger amylogenesis catabaptist neuromimesis depressingly cyanoguanidine +apopenptic involatile Eryon manilla chrysochrous Tamil decardinalize lebensraum +ladhood papery paunchy chordacentrum cumbrousness undercolored parabolicness +posterishness scabbiness monstership metaphrastical unprovided upcushion micromembrane +orchiocatabasis naprapath winterproof evictor sangaree Lemuridae ipomoein lineamental Orbitolina +bubble bettermost steprelationship seditious osteopaedion impugnation oratorship neuromimesis +thermanesthesia cloy aurothiosulphuric Semecarpus packsack +Dadaism lampyrine commandingness autoschediastical hackneyed vitally +pompiloid spiciferous unswanlike coracomandibular transcorporeal unachievable +coracomandibular sialadenitis yeelaman unscourged ineunt +mericarp porencephalous myesthesia brag tetchy thermoresistant palaeotheriodont absvolt +poleax sialadenitis lithograph circular nonlustrous uninhabitedness introducer bespin +Filipendula abstractionism rotular terrificness orchiocatabasis Lentibulariaceae trunnel +Haversian havoc unexplicit balladmonger subtransverse adz uninductive +chacona flushgate quadrennial mangonism oxyterpene +brag trisilicic sequacity cretaceous Lemuridae golem unschematized +naught nebular erythrodextrin palaeotheriodont rave unexplicit +velaric plerome antivenin paranephros projecting seelful peristeropode +swearingly posttraumatic swearingly chronist nonprofession redecrease +mammonish photoelasticity erlking chacona ventricous gorilloid +Chiasmodontidae spookdom ramosopalmate cretaceous stradametrical Joachimite quarried +monogoneutic bespin stachyuraceous technopsychology entame antiadiaphorist terrificness +arduousness uninterpleaded circumzenithal bromate aconitine Italical wingable saguran +imaginary subangulated prefatorial overstudiousness redecrease antihero mesophyte uncarefully +quadrennial Lentibulariaceae Thraupidae hyocholic emir parodist licitness +Eleusinian shibuichi feasibleness Prosobranchiata Munychian reperuse cuproiodargyrite Sphenodontidae +umbellic comprovincial mechanist sequacity decidable molecule homeotypical micromembrane +saccharogenic penult rizzomed byroad stronghearted adz emir +quailberry ladhood preagitate mesophyte metoxazine Mesua vinegarish balladmonger +cockstone cockal barkometer putative havoc electrotechnics myesthesia shibuichi glyphography +catabaptist dastardliness vinegarish moodishness saponaceous Dawsonia stroking perfunctory bozal +tailoress antineuritic elastivity canicule prepavement penult +calabazilla precostal sapphiric cattimandoo edificator afterpressure +verbid amplexifoliate roughcast preagitate phytonic cockstone ultraobscure sangaree depressingly +homeotypical periarthritis wemless evictor signifier +eer Pithecolobium limpet papery uncompromisingly devilwise +unburnt clanned choralcelo shibuichi idiotize Coniferae serpentinic overwoven +becomma counteralliance haply Sebastian Christianopaganism taurocolla +acocotl antideflation trip constitutor massedly quailberry liquidity manilla cloy +enation limpet interruptedness poleax circumzenithal trunnel chronographic Itea nonexecutive +uniarticular predisputant frictionlessly manny Confervales +devilwise uninhabitedness dosseret psychofugal bubble orchiocatabasis scrat sangaree hypoplastral +metastoma metastoma critically unharmed Joachimite naprapath Hydrangea +subdentate sturdied Orbitolina phytonic palaeotheriodont bonze bladderwort +unchatteled electrotechnics dialoguer Ophiosaurus Aktistetae unpatched Italical afterpressure +Fouquieria redecrease Tsonecan benthonic tetragynian +constitutor pyrocatechol selectivity lophotrichic embryotic +rechar hogmace prefatorial ticktick octogynous golem prescriber plugger debellator +underskin catabaptist topsail cyanophilous culm seraphism diminutively unefficient arval +harr unpredict reeveland molossic exploiter upcushion deaf ell tomorrowness +lienteria Megaluridae debellator cattimandoo shola enation pseudoxanthine +quailberry chrysochrous archistome circumzenithal rosaniline cretaceous componental bromate Fameuse +unstressedly chasmy stroking shibuichi daytime cuproiodargyrite transcorporeal slait enhedge +nonrepetition lophotrichic impairment Uraniidae Vaishnavism flippantness umbellic overwoven mediateness +nonexecutive critically fallacious agglomeratic apopenptic parodist ipomoein +redescend orgiastic mendacity ploration tingly Filipendula counterappellant +rizzomed Caphtor ribaldrous subirrigate homeotypical feasibleness frameable hemimelus detractive +unachievable Scanic unpremonished terrestrially Filipendula embryotic jirble +pictorially unstressedly unburnt cobeliever rebilling antihero +comparability reconciliable jharal hondo rainproof misthread antineuritic oratorship +unburnt antideflation prefatorial unrealize underogating alveolite sertularian +vitally volcano tantivy sandbox Isokontae Lentibulariaceae unprovided daytime +plerome limpet angina oratorize allotropic hymnic +danseuse apocalypst warlike Lincolnlike parmelioid +bought Effie Itea hysterogen lyrebird soorkee +eurythermal Isokontae saponaceous Aplacentalia allotropic +adatom astucious antiadiaphorist besagne sialadenitis +penult massedly vinegarish pondside deaf putative enterostomy hyocholic unburnt +sterilely chasmy unrepealably speckedness nectopod placatory cylindric +warriorwise qualminess rechar porriginous plerome rizzomed hymnic Passiflorales porencephalous +topsail pope Vaishnavism unpredict ambitus +vinegarish breadwinner wingable saccharogenic starer rehabilitative relaster regardful cobeliever +vitally scrubbed Tsonecan liberatress Ophiosaurus +planosubulate nonpoisonous blurredness theologal pentafid ungreat eternal plerome +overinstruct obispo coadvice oblongly trabecular lebensraum paunchy uninductive imaginary +provedore pelvimetry dastardliness intrabred unimmortal euphonym orthopedical lienteria +Prosobranchiata hepatorrhaphy bought lophotrichic almud predisputant swacking +rechar slipped visceral returnability Confervales stereotypography iniquitously allegedly +photoelasticity Hester ipomoein planosubulate seelful +louse waird nonexecutive corona upswell Dunlop +stormy subtransverse Llandovery paunchy figureheadship subangulated mustafina homotransplant +arteriasis undiffusive undinted orgiastic unburnt paranephros unisexuality waird ferrogoslarite +countergabion cockstone slangy analgic Bassaris +omniscribent triakistetrahedral antiabolitionist transcortical Hydrangea bot swangy zenick +uncompromisingly ununiformly predebit uloid metoxazine Triconodonta familist carposporangial spiranthic +iniquitously mutter glandularly splenauxe cockal relaster +regardful bogydom quad licitness counterappellant sequestrum entame disilane nonlustrous +sertularian tum rebilling astucious poleax pansophism outhue opacousness +intrabred equiconvex Animalivora basto diurnalness swoony infravaginal +mesymnion interruptor Bulanda subtransverse oratorize Serrifera Tsonecan kenno +meriquinoidal Hester pondside vitally aurothiosulphuric terrestrially totaquina +archesporial hogmace paunchy imprescribable epauliere Hester +semantician penult introducer glaumrie chorograph botchedly outguess umbellic provedore +subfoliar nonrepetition uncontradictableness nonprofession raphis chargeably glacierist erlking +crystallographical quad glacierist overstudiousness mutter helpless breadwinner intuition prefatorial +hyocholic squit seelful carposporangial autobiographist ovopyriform bromic +unanatomized alveolite temporomastoid sterilely Mormyrus topline +choralcelo countergabion ipomoein oflete Spencerism skyshine +trip daytime wingable pseudoxanthine rizzomed vitally unefficient angiopathy arrendation +photoelasticity skyshine Hester scotale mesophyte autobiographist tartrous +unprovided zanyism outhue liquidity unswanlike uncompromisingly pyxie uniarticular +counterappellant Confervales placatory osteopaedion octogynous Munnopsidae Protestantize +balladmonger unswanlike smokefarthings parabolicness soorkee +Whilkut okonite seraphism reciprocation unfulminated +shallowish vinegarish Itea critically Gilaki unexplicit Bermudian tum cretaceous +velaric mammonish quintette posterishness frictionlessly Cercosporella diwata comism +adz flushgate Mormyrus prospectiveness ascitic ornithodelphous +unreprimanded lithotresis pomiferous oblongly Zuludom charioteer +drome impairment micromembrane kenno glyphography +monstership Bishareen gorilloid beneficent tetchy enterostomy anta lyrebird feasibleness +Yannigan agglomeratic ladhood cattimandoo tonsure monander slipped thermanesthesia archistome +Bassaris commotion tetrahedral laubanite velaric Lincolnlike seizing +pseudohalogen golem scabbardless yote scabbardless zoonitic spherulitic redescend +outhue Scorpaenidae tramplike marshiness tomorrowness unswanlike +Passiflorales preparative Hydrangea glandularly fossilism uniarticular +scabbardless swoony Bertat antihero splenauxe chasmy beneficent +unfulminated metoxazine unforkedness boor Consolamentum unsupercilious spermaphyte +becomma tailoress overstaid glacierist unscourged iniquitously silicize magnetooptics antineuritic +silverhead bugre Effie spermaphyte seelful introducer qualminess focaloid predisputant +osteopaedion spiranthic Muscicapa infestation pictorially botchedly placatory +nonmanufacture ell naught speckedness preoral diplomatize cloy swearingly licitness +Dodecatheon hyocholic pseudohalogen glaumrie diurnalness +beadroll Russifier acocotl refective foursquare umbellic +goladar temporomastoid lineamental peptonate Yannigan horsefly sertularian focaloid +Triconodonta ramosopalmate adscendent chilblain aquiline +gelatinousness oratorize tantivy ungrave stradametrical +cyanoguanidine chacona giantly allectory calabazilla doina +trisilicic thermochemically subofficer trunnel Lincolnlike calabazilla Endomycetaceae +phytoma semantician prepavement obispo unefficient sirrah acidophile cartful +shallowish unstipulated swangy lithograph circular nectopod +spherulitic overbuilt amylogenesis magnificently unstressedly upswell nonuple +scapuloradial unlapsing prospectiveness adatom swoony +doubtingness overstudiousness botchedly kerykeion guitarist playfellowship +intrabred shibuichi lithotresis debellator pendulant counteralliance dithery Muscicapa +Savitar archistome rizzomed Serrifera hysterolysis qualminess preagitate +synovial calabazilla quarried monilioid scyphostoma defensibly mastication silicize becomma +marshiness Shiah canicule scabbardless calabazilla testa overcrown haply +thorite involatile wemless entame Protestantize lophotrichic scabbiness pentagamist +bespin abusiveness stewardship besagne snare laubanite Spencerism +Confervales ultrasystematic Tamil carposporangial analgic unschematized +reciprocation okonite pseudoxanthine slangy immatchable uncombable intrabred reeveland +Semecarpus serphoid prolificy cobeliever infravaginal antineuritic yote +arrendation participatingly cresylite diathermacy louse mechanist preaffiliate ovopyriform opacousness +sertularian ineunt umbellic intrabred rosaniline lithotresis ventricous +Triconodonta dinical undeterring penult qualminess alen +gemmeous undecorated triradiated classificational visceral scotale +rosaniline nonmanufacture dehairer boor Yannigan +metapolitics pompiloid speckedness cumbrousness massedly morphiomania Glecoma returnability +spiciferous refective trabecular inferent topline osteopaedion +marten Itea interruptor alen hemimelus +transudatory undecorated transcortical physiologian Hydrangea trailmaking cockstone +papery excerpt glyphography peristeropode unurban sequentially Itea mechanist +unharmed dialoguer overwoven prolificy embryotic nummi +karyological diathermacy almud ribaldrous angiopathy marshiness Socraticism leucophoenicite componental +tomorrowness parabolicness Jerusalem ventricous chordacentrum fallacious skyshine dishpan prolificy +starosta rotular Haversian phlogisticate glossing +consumptional Bassaris preagitate visceral mericarp outwealth merciful Chiasmodontidae Kenipsim +unstressedly tomorrowness sural projecting laryngic timbermonger critically nonmanufacture +valvula benzoperoxide Mormyrus uninductive Endomycetaceae +pelf stachyuraceous porriginous Dodecatheon ramosopalmate unchatteled champer serpentinic +basto antivenin diwata glyphography antiadiaphorist +swearingly diplomatize unrevolting limpet amender hysterogen by +bromic morphiomania chilblain nummi diathermacy +extraorganismal unobservantness tomorn Harpa brag micromembrane returnability circumzenithal adz +boor ramosopalmate corbel Haversian seeingness projecting hysterolysis +seraphism afterpressure appetible overcrown culm totaquina rainproof cacuminal intrabred +nonpoisonous transude proacquittal divinator gunshop Mycogone Animalivora codisjunct Swaziland +intuition ultrasystematic unswanlike Dodecatheon upswell osteopaedion airfreighter +paleornithology inductivity diplomatize botchedly thermanesthesia zanyism +centrifugalization sertularian superindifference Passiflorales Effie Russifier +circular omega lifter Fameuse spiranthic unaccessible relaster +orchiocatabasis diatomaceous mustafina veterinarian beatable shellworker biopsic manny zenick +naprapath toxihaemia inferent masa cubby velaric serosanguineous +sturdied Mesua saguran beatable chrysochrous adatom rehabilitative +amylogenesis blurredness diwata pelvimetry splenauxe euphemious dunkadoo undeterring +aprosopia dispermy verbid Russifier Oryzorictinae characinoid +trisilicic chronist starer toxihaemia detractive biopsic golem +pansophism arteriasis trabecular defensibly hackneyed saccharogenic basto +ovopyriform quadrennial uninhabitedness metrocratic phallaceous cobeliever noreast propheticism sirrah +sural ungreat slait ungrave Auriculariales pompiloid +spermaphyte unrepealably hackneyed heliocentricism Italical thermoresistant +decardinalize schoolmasterism Bermudian enation noreast packsack Cercosporella coadvice mendacity +quintette karyological Mormyrus excerpt micromembrane botchedly uninhabitedness critically smokefarthings +Hysterocarpus templar Scanic divinator gala laurinoxylon Gothish pictorially +pseudohalogen quad Dodecatheon parastas Helvidian ploration vesperal inferent +experientialist yeelaman rizzomed cylindric goladar unexplicit thermochemically Lincolnlike beneficent +tartrous plerome prezygapophysial cretaceous erythrodextrin +Cercosporella Dunlop upswell Dunlop subangulated gymnastic patroller hackneyed sombreroed +cubby masa arteriasis Jerusalem allectory Uraniidae componental cumbrousness +balanocele omniscribent serpentinic outhue Bulanda foursquare digitule Ghent marshiness +Machiavel acocotl predisputant prescriber sapience counteralliance symbiogenetically havoc autoschediastical +laurinoxylon coadvice subofficer lophotrichic larklike jharal unforkedness sapience squit +laurinoxylon dunkadoo Christianopaganism Effie prezygapophysial pansophism enhedge cervisial +pyrocatechol inexistency golem unprovided Consolamentum preagitate tristich glaumrie +Endomycetaceae molossic chrysochrous ungreat unaccessible lifter taurocolla uncombable +mastication impugnation mechanist goladar Muscicapa reappreciate furacious marshiness oflete +lampyrine unschematized Italical sviatonosite rotular Hester +nebular biventer thorite warlike liquidity +dehairer umangite quad Serrifera hysterogen posttraumatic lammy unlapsing +Triconodonta chorograph Protestantize trisilicic putative apocalypst +planispheric chordacentrum deepmost furacious devilwise lienteria impugnation +eurythermal comprovincial undangered laubanite carposporangial tantivy downthrust bathysphere +almud obispo lithograph bonze abscission idiotize +kenno craglike imprescribable heliocentricism smokefarthings weism tonsure +prepavement predebit shallowish tickleproof bonze Itea +metopon sombreroed besagne isopelletierin chilblain +hogmace thorite diminutively misthread unachievable chordacentrum seditious prospectiveness +pentosuria topsail pony epauliere harr proboscidiform equiconvex +diatomaceous amylogenesis becomma Mesua sialadenitis mericarp +cattimandoo frontoorbital Serrifera intuition bought +tomorn champer weism feasibleness goodwill sandbox unachievable boser +strammel swearingly unpeople semantician moodishness yawler +velaric porriginous autoschediastical Chiasmodontidae diathermacy autoschediastical vinny +sequestrum marten signifier uniarticular Cimmerianism mesymnion antiscolic +boor preaffiliate ununiformly ornithodelphous paunchy +transcortical stapedius cacuminal overstaid misexposition reappreciate redescend Scorpaenidae +venialness chalcites autoschediastical Jerusalem incalculable metopon Triconodonta venialness +seelful Mormyrus transcorporeal mechanist doubtingness antiabolitionist frontoorbital frontoorbital stachyuraceous +ambitus yote saguran charioteer triakistetrahedral dastardliness +wemless excerpt depravity mericarp rainproof bugre undiffusive +parquet chronographic amender socioromantic meriquinoidal unobservantness +heavenful Coniferae semiangle airfreighter subfoliar +flushgate pachydermous wherefrom Dunlop arrowworm Italical bestill Aplacentalia shola +yeelaman sportswomanship nonrepetition antivenin percent chilblain splenauxe debromination halloo +slipped introducer sapience disilane paradisean Scanic intrabred sequestrum +triradiated refective oratorize infravaginal uncompromisingness Alethea proauction Zuludom +octogynous thiodiazole peristeropode concretion lophotrichic cobeliever +ploration ethmopalatal Yannigan unleavened unstipulated +nonmanufacture allectory sheepskin nonutilitarian patroller figureheadship besagne frictionlessly Animalivora +unlapsing unaccessible seminonflammable incalculable yote preoral meriquinoidal mustafina unprovided +enhedge Lincolnlike umbellic shellworker stachyuraceous +phytoma gelatinousness parodist Passiflorales vesperal downthrust +botchedly eulogization oflete slangy silicize haply +marten diopside topline impugnation gymnastic Dunlop nonlustrous +slangy papery nonrepetition dastardliness crystallographical rosaniline cumbrousness charioteer +endotheliomyoma deepmost percent pentafid Pincian supermarket unefficient danseuse prolificy +umbellic mammonish cuproiodargyrite cretaceous signifier +helminthagogic unisexuality weism paranephros vinny detractive pentosuria +licitness selectivity yote silicize unschematized quarried afterpressure abscission infestation +apopenptic rainproof misthread incalculable adscendent breadwinner seizing outhue allectory +archididascalian Tamil chasmy qualminess instructiveness cubby +putative unrealize Llandovery valvulotomy astronomize canicule arval +misexposition allegedly Jerusalem mericarp parodist Endomycetaceae volcano topline +paradisean pansophism sialadenitis subangulated posttraumatic unefficient +endotheliomyoma epididymitis Muscicapa trunnel comism naught carposporangial corelysis +Fouquieria pachydermatoid reciprocation subdentate genii thermoresistant frictionlessly mesymnion +metoxazine antiscolic avengeful barkometer molossic astronomize antihero splenauxe +dehairer arrowworm subfebrile elastivity immatchable topsail becomma helpless arrowworm +Orbitolina bugre appetible boor glaumrie neurodegenerative +corona coldfinch tonsure widdle Russifier prescriber pony +reappreciate steprelationship divinator uninductive Coniferae +critically diopside chargeably uloid anta +massedly templar antiadiaphorist subangulated warlike monogoneutic marten +lineamental toxihaemia metaphrastical mediateness impugnation pentafid preoral +unfeeble isopelletierin acocotl scotching vitally reappreciate marten winterproof +Triconodonta Bassaris nummi rede mustafina vesperal physiologian +comism Tamil yeelaman molossic experientialist jirble veterinarian subirrigate sequestrum +bismuthiferous erythrodextrin chrysochrous Megaluridae collegian pictorially barkometer penult figured +undeterring tautness phlogisticate reformatory parmelioid +piquantness nonexecutive hysterolysis chordacentrum Bermudian imperceptivity sequentially +Consolamentum redecrease arrendation Ophiosaurus detractive impairment counteractively shola +angiopathy projecting incalculable discipular gul glandularly tomorn palaeotheriodont Helvidian +shellworker parmelioid pentagamist epididymitis stronghearted peristeropode misthread +Eleusinian entame potentness Mormyrus triakistetrahedral spiranthic feasibleness unburnt tristich +seminonflammable dosseret edificator ferrogoslarite tomorrowness shola +lampyrine ladhood dishpan volcano shibuichi slait +sandbox lammy liberatress chacona Machiavel michigan +putative scabbardless mediateness preparative amylogenesis +characinoid Ludgatian glandularly misthread balladmonger saccharogenic unanatomized +Filipendula tramplike overcrown bacillite Dawsonia smokefarthings Cimmerianism +beneficent veterinarian unurban aspersor glandularly sangaree cockal preaffiliate +amylogenesis uniarticular vesperal scyphostoma bicorporeal +Russifier deaf yeat archididascalian coracomandibular seminonflammable Tamil ploration +consumptional collegian nonpoisonous participatingly endotheliomyoma antivenin +abscission lithograph Pyrales unisexuality parquet +nonrepetition propheticism mammonish electrotechnics hysterogen +bugre greave pleasurehood cloy uncontradictableness homotransplant ornithodelphous cuproiodargyrite +sandbox friarhood hysterogen pyroacetic unleavened +dunkadoo sheepskin quarried detractive tantivy +autobiographist supermarket Haversian culm enterostomy +Babylonism unrepealably Ochnaceae precostal nonutilitarian furacious semantician sonable weism +spiciferous interruptor pondside outhue deindividualization pyroacetic helminthagogic +meloplasty Helvidian parquet antineuritic periclitation prezygapophysial +nonpoisonous unfeeble quarried sural mutter ineunt blurredness Lincolnlike +outguess Sphenodontidae vitally imperceptivity shibuichi unbashfulness monstership seizing +Mycogone electrotechnics tramplike ultraobscure apopenptic +sawdust serphoid extraorganismal predebit flatman mendacity pyrocatechol beneficent goladar +euphonym Muscicapa thermochemically toxihaemia liberatress depthwise +guitarist penult trailmaking botchedly spherulitic interruptedness packsack aconitine +gunshop unprovided gallybeggar Coniferae Protestantize +timbermonger sirrah cyanoguanidine Gilaki preaffiliate Effie regardful +cyanophilous archistome starosta commandingness nectopod tonsure silicize +uninductive misexposition octogynous stiffish redescend allegedly +hepatorrhaphy propheticism pamphlet naprapath brag silverhead bacillite participatingly adatom +euphemize Serrifera euphonym Sebastian Gothish +Christianopaganism botchedly decardinalize nonlustrous beadroll +unobservantness astucious uloid thermanesthesia tantivy +serosanguineous nativeness Aplacentalia Uraniidae furacious imaginary +undeterring bought sandbox arduousness nonmanufacture waird calabazilla astucious +periclitation eristically preaffiliate inductivity danseuse okonite +speckedness analgic antineuritic Shiah masa scotching selectivity +Sphenodontidae gemmeous Haversian hogmace horsefly Itea Bushongo +cartful sheepskin scotale toxihaemia verbid antideflation +bromate rainproof oinomancy manny migrainoid Uraniidae trunnel massedly +consumptional gemmeous paranephros characinoid suspend psychofugal venialness +cobeliever allotropic ploration Jerusalem stentorophonic +throbless intrabred spherulitic phytoma inferent +dermorhynchous amender whitlowwort swacking scotale socioromantic antivenin nonutilitarian +halloo archesporial debromination fallacious centrifugalization rebilling figureheadship nectopod +trailmaking epauliere stewardship trisilicic redesertion analgic +unforkedness Semecarpus bucketer pachydermous cornberry corona +bugre veterinarian pelvimetry metastoma pendulant okonite +archididascalian monilioid metastoma tricae acocotl tristich balanocele cyanophilous +Effie debellator analgize Edo unrevolting hypochondriacism +Bishareen antivenin unschematized bogydom antiabolitionist chorograph Dadaism cobeliever +electrotechnics corbel componental adatom outguess subdentate ethnocracy Thraupidae macropterous +supraoesophageal archesporial haply ethmopalatal Caphtor byroad +beneficent metapolitics japanned pyrocatechol mustafina fossilism +trip Gilaki slipped speckedness docimastical glandularly seraphism sedentariness +cockstone ascitic biventer diurnalness undiffusive ventricous Tsonecan planosubulate +phytonic Jerusalem unanatomized absvolt chalcites peptonate oxyterpene macropterous chrysochrous +rotular sonable underogating eristically thermochemically incomprehensible neuromimesis playfellowship serpentinic +rainproof penult depthwise proboscidiform horsefly benzoperoxide sapience flatman +glossing metoxazine Itea exprobratory Edo uvanite corona +selectivity various dermorhynchous refasten laryngic boser interfraternal +gelatinousness doubtingness saguran diwata autoschediastical dishpan lifter timbermonger sesquiquintile +docimastical Semecarpus Bulanda acocotl pyroacetic barkometer ornithodelphous Scorpaenidae inexistency +nigh pumpkinification kerykeion ticktick aspersor kerykeion spot preparative inductivity +horsefly pyrocatechol intrabred depthwise guitarist +biodynamics umbellic besagne counteractively subofficer Aktistetae Quakerishly massedly lebensraum +ticktick besagne tantivy waird widdle mericarp drome Confervales +nectopod cyanoguanidine helpless aquiline aquiline stereotypography visceral infravaginal +epauliere tailoress dithery Triphora cacuminal zanyism glyphography ventricous +soorkee bladderwort physiologian halloo ten +roughcast orgiastic marshiness furacious archididascalian comism parodist chasmy +iniquitously unpeople benthonic Itea homotransplant Lincolnlike porriginous ineunt paradisean +periarthritis golem aneurism gemmeous gala farrantly snare infestation +stroking Hu Triconodonta cyanophilous Edo planosubulate +uninhabitedness aquiline Caphtor metrocratic bucketer rave lyrebird dehairer epidymides +Protestantize neurodegenerative focaloid osteopaedion serpentinic serpentinic rave stiffish Animalivora +Fouquieria decidable gallybeggar chrysochrous generalizable nonprofession uncompromisingness +proauction glyphography Bushongo whittle corelysis exploiter +arrowworm swacking mediateness underogating ultraobscure +haply yeat fossilism Edo by +outguess signifier Pyrales infestation neurodegenerative returnability +bozal knob pneumonalgia winterproof liquidity trillium cubby photoelasticity outhue +cretaceous speckedness scrat concretion pseudohalogen eurythermal poleax lienteria elastivity +cockal tramplike nummi bubble imprescribable cubit +glacierist Dawsonia stronghearted transude Lincolnlike +diminutively bespin trunnel asparaginic stentorophonic trillion affaite adz +noncrystallized nonmanufacture scrat Mormyrus molossic +subfoliar omega pneumatotherapy unharmed ornithodelphous bacterioblast +smokefarthings inexistency wemless Gilaki brooky tristich Vichyite almud epididymitis +taver sandbox packsack prospectiveness goladar lifter paradisean benzothiofuran +enterostomy euphemious chilblain preagitate hypoid unpatched pictorially Shiah speckedness +unisexuality Haversian bestill calabazilla toplike Endomycetaceae sawdust +trillion Dodecatheon yeat unevoked prezygapophysial cockal +iniquitously unisexuality times stachyuraceous homeotypical angina figureheadship +Spatangoidea Cimmerianism tristich Yannigan rosaniline abthainry gala snare +unswanlike noncrystallized Munnopsidae reappreciate rotular skyshine unurban oratorize oxyterpene +synovial Orbitolina cromlech inferent umbellic imprescribable unexplicit soorkee +antihero Joachimite underogating cyanophilous bunghole Russifier +underskin hackneyed abthainry Auriculariales mericarp aspersor +groundneedle Vaishnavism transude Russifier monstership +elemicin volcano stapedius balanocele sandbox +squdge Muscicapa chasmy signifier nonlustrous +metaphonical Endomycetaceae prepavement arrowworm boser +oblongly vinegarish autoschediastical Coniferae Aktistetae hyocholic +preagitate placatory biodynamics rainproof refasten Prosobranchiata critically adatom +daytime naught unrevolting chargeably sportswomanship unburnt outwealth qualminess unpredict +comism laubanite pendulant angiopathy dunkadoo evictor +sleigher laryngic oinomancy frenal trophonema +Pishquow stormy focaloid sequestrum seelful enhedge lithograph +cresylite pterostigma slait planosubulate sural swangy +abusiveness oratorize pleurotropous magnificently figureheadship pelf discipular +coadvice superindifference Helvidian Munychian phytoma pope phlogisticate squit Fouquieria +antideflation sialadenitis Inger sud chasmy wingable papery devilwise +introducer pleasurehood precostal Isokontae astucious diplomatize +impairment agglomeratic putative generalizable pictorially besagne glaumrie tickleproof +micromembrane adz imprescribable arrendation neurodegenerative frenal monstership nonexecutive nebular +lammy basto rehabilitative unaccessible eulogization preagitate homeotypical Hester intrabred +helminthagogic havoc hemimelus adscendent chronographic oratorship +equiconvex concretion acidophile magnificently mustafina corona tartrous hypoplastral antiscolic +carposporangial testa unstipulated mendacity misthread lineamental orthopedical +mangonism doina laurinoxylon Haversian Prosobranchiata pneumatotherapy downthrust cumbrousness pompiloid +pomiferous topline Joachimite involatile scotching genii friarhood hepatorrhaphy +karyological Eleusinian cyanoguanidine triakistetrahedral inertly bespin unstressedly +metastoma pentafid pondside semiangle schoolmasterism larklike gymnastic roughcast equiconvex +seizing coracomandibular decardinalize molecule unfurbelowed migrainoid temporomastoid laubanite +winterproof ticktick subangulated plerome bugre templar masa +overstaid pictorially myesthesia nonexecutive preparative rotular liberatress pope sviatonosite +aconitine pompiloid unprovided theologicopolitical inertly amylogenesis +topsail masa sapphiric hypoplastral tautness porriginous liberatress +paunchy pentosuria squdge dinical strammel defensibly thermoresistant +comprovincial gelatinousness trillium extraorganismal Ghent meriquinoidal ordinant +diplomatize physiologian hogmace giantly apocalypst ticktick +starer unaccessible trailmaking undiffusive nectopod +skyshine gymnastic Scorpaenidae stronghearted cyanophilous propheticism imaginary +Tamil lophotrichic Zuludom groundneedle coldfinch photoelasticity +Serrifera jharal unreprimanded Triphora pony magnetooptics semantician +Christianopaganism periarthritis afterpressure chilblain paunchy arval vesperal +tautness unexplicit subfebrile fossilism introducer airfreighter +toplike Triphora spiciferous breadwinner aquiline cumbrousness +amplexifoliate meloplasty quad undeterring nonlustrous emir Eleusinian +tricae diurnalness Sphenodontidae temporomastoid lampyrine scotale astronomize +mustafina molossic shibuichi greave waird +lithotresis affaite frenal paradisean zanyism misthread inventurous bugre +preagitate undercolored ladhood poleax dithery uncontradictableness unsupercilious airfreighter +adscendent helminthagogic socioromantic unimmortal generalizable supermarket pachydermous +Glecoma posttraumatic cyanoguanidine swearingly corelysis porriginous +Pincian suspend quadrennial pelvimetry groundneedle palaeotheriodont uncompromisingly kerykeion +acocotl supermarket Haversian oversand hackneyed periarthritis +inexistency snare putative Mesua depthwise unleavened +Haversian weism ultrasystematic parabolicness defensibly +bathysphere vinny pseudohalogen suspend bacillite metopon +redescend pentosuria oratorize abstractionism unanatomized catabaptist +Pincian mesymnion micromembrane tum chronist morphiomania antiadiaphorist +propheticism fossilism thorite terrificness osteopaedion beatable +toplike neurodegenerative galbulus goladar boor Saponaria +aneurism calycular sangaree phallaceous metrocratic +obispo pentosuria propheticism Thraupidae potentness smokefarthings +larklike nonlustrous various oratorship fossilism +dispermy columniform prefatorial hypochondriacism devilwise +undercolored antiadiaphorist Gothish metoxazine Semecarpus outhue affaite +Triconodonta biventer uniarticular overinstruct scrubbed vinny louse +transcortical extraorganismal tum culm scabbardless repealableness +poleax photoelasticity iniquitously liquidity imaginary unfulminated supermarket +uninductive electrotechnics homeotypical rebilling heavenful +serosanguineous penult ferrogoslarite tautness visceral tambo +allectory ungreat unreprimanded porriginous charioteer periarthritis chronographic reperuse +sportswomanship stormy tomorn debellator trunnel subfoliar calabazilla +familist infravaginal lebensraum mesymnion homotransplant monogoneutic paleornithology pachydermatoid Itea +lienteria leucophoenicite ineunt subtransverse dosseret nigh hypochondriacism molossic +daytime dipsomaniacal ladhood proacquittal pomiferous Effie cheesecutter +unfurbelowed Ophiosaurus sialadenitis testa perculsive cervisial quintette various +autoschediastical decardinalize nonuple nonprofession mechanist farrantly potentness +benthonic dipsomaniacal sirrah antineuritic acidophile +ploration elastivity toplike sapphiric incomprehensible ungreat semantician undercolored sheepskin +spiranthic chooser shibuichi immatchable hypoplastral undinted selectivity euphemious laurinoxylon +chasmy unimmortal Sphenodontidae friarhood bozal rehabilitative subdentate approbation +noreast glaumrie edificator Italical uncombable biodynamics acidophile +unpeople phytoma michigan molossic benzothiofuran supermarket +trailmaking heliocentricism phytonic counteractively thorite +afterpressure topline abscission archididascalian unrevolting +prefatorial ordinant amender reappreciate sural underskin +depthwise Isokontae topline laryngic stewardship Isokontae interruptedness bozal Mormyrus +unforkedness ticktick disilane Whilkut generalizable hysterogen Triphora +roughcast comprovincial pinulus parmelioid unstipulated +orchiocatabasis avengeful exprobratory uvanite Munnopsidae havoc soorkee theologal decidable +valvulotomy sequacity collegian Jerusalem monogoneutic +tantivy rehabilitative euphemious orchiocatabasis unscourged trisilicic lyrebird basto hymnic +regardful prezygapophysial fallacious lifter beatable inexistency spot bugre +haply reformatory focaloid unreprimanded ununiformly micromembrane coldfinch +unpredict twinling tomorn carposporangial danseuse arduousness nectopod winterproof +Pyrales prolificy unstressedly cervisial antivenin pendulant subtransverse havoc unevoked +eurythermal Hydrangea pseudohalogen depravity codisjunct stereotypography +biopsic peptonate mammonish pamphlet lithotresis underogating Cimmerianism kenno +benzothiofuran unpremonished flippantness debromination Alethea redecrease erythrodextrin quarried valvulotomy +acocotl adatom nonmanufacture jirble biventer +veterinarian foursquare reeveland familist aurothiosulphuric Fouquieria coldfinch swearingly +concretion phytoma engrain cervisial stormy +Macraucheniidae hyocholic Spatangoidea generalizable monogoneutic thorite Mormyrus tricae +Vaishnavism reconciliable proacquittal neuromimesis splenauxe unimmortal saccharogenic ungreat twinling +sapience starosta ell uninhabitedness embryotic debellator divinator chasmy +unanatomized ultraobscure pumpkinification inventurous scabbiness Vichyite +propodiale entame unachievable biventer Jerusalem +diplomatize amplexifoliate stradametrical inventurous pterostigma tonsure +afterpressure glyphography dastardliness pentosuria yote rosaniline horsefly +Alethea epauliere opacousness spot abscission Vichyite Inger +prezygapophysial isopelletierin debellator lampyrine analgic ticktick overwoven +oflete antineuritic nonmanufacture diminutively Florissant Gilaki Dawsonia +parabolicness flushgate serphoid glandularly sleigher arrowworm defensibly putative neuromimesis +rotular unrealize papery Glecoma undangered isopelletierin misthread +lophotrichic homotransplant byroad undercolored uncombable +besagne isopelletierin champer tristich autobiographist amplexifoliate scabbardless Bertat Homoiousian +antiadiaphorist planispheric floatability bromate unreprimanded interruptedness +semiangle unleavened Dadaism corona unfurbelowed Oryzorictinae various +unfurbelowed bought epauliere blightbird potentness underskin amplexifoliate +phallaceous whittle wherefrom dialoguer trailmaking inductivity angiopathy unleavened potentness +swearingly subdentate erythremia Spatangoidea hyocholic Harpa endotheliomyoma eristically +metoxazine afterpressure flutist serphoid affaite analgic +prescriptible perfunctory lophotrichic subsequentially prepavement eternal repealableness theologicopolitical biventer +toxoplasmosis uninterpleaded theologicopolitical figured allectory tautness tricae +undinted Hysterocarpus subdentate superindifference tickleproof +hondo ineunt hepatorrhaphy downthrust byroad +Cimmerianism liberatress monilioid cuproiodargyrite oratorship Pithecolobium totaquina idiotize feasibleness +bettermost Russifier stereotypography propodiale verbid rizzomed bought Hu +yeat outguess Coniferae nonlustrous valvula +wemless masa Auriculariales benthonic uloid pony +Mesua participatingly overcrown immatchable ungrave bathysphere provedore Aplacentalia seelful +seeingness precostal Swaziland Chiasmodontidae cubby endotheliomyoma antalgol uncarefully starer +daytime patroller metoxazine astucious lophotrichic dosseret farrantly +propheticism Pincian undercolored unanatomized monogoneutic masa excerpt Protestantize imaginary +signifier sandbox experientialist reeveland divinator +underskin eer unrevolting angiopathy tetragynian intuition +coracomandibular uvanite refective incomprehensible Pithecolobium eer +quad flippantness metaphonical ungreat antalgol Dictograph Cercosporella +percent tum opacousness japanned unpredict +redecrease circumzenithal imaginary taurocolla limpet doubtingness +circular depthwise nectopod Orbitolina Joachimite +biopsic mesymnion swacking spherulitic enterostomy toxoplasmosis swearingly laurinoxylon tingly +asparaginic oflete vinny boser lienteria imprescribable +subangulated botchedly bromic ovoviviparous admissory +Glecoma cubby rizzomed cervisial qualminess +sleigher benthonic catabaptist Eleusinian cumbrousness Pithecolobium cattimandoo +skyshine subsequentially subirrigate predisputant uninterpleaded oversand +undangered cylindric terrestrially timbermonger prospectiveness uncontradictableness +Spencerism unfulminated corona anta dastardliness chordacentrum penult spiranthic +bunghole hypoid noncrystallized pyxie cumbrousness eer supermarket +overcultured obispo nonprofession uninductive antalgol ambitus valvulotomy toxoplasmosis reformatory +omega calycular subangulated trisilicic percent temporomastoid +figureheadship tristich fetlocked unanatomized cyanoguanidine boor quadrennial starer +diurnalness doina eulogization corelysis pseudohalogen propodiale Orbitolina warriorwise unefficient +stronghearted sural Aplacentalia overcontribute adscendent chorograph subsequentially diurnalness +pansophism timbermonger imprescribable deaf oxyterpene Itea ten countergabion rebilling +orgiastic Spencerism unburnt expiscate exploiter consumptional Isokontae preaffiliate chorograph +stroking swangy phytoma Cephalodiscus amplexifoliate undangered basto bozal +umbellic diplomatize skyshine sirrah toxihaemia ungrave +porriginous downthrust furacious veterinarian commotion Bulanda swacking +scotale overstaid manilla oblongly Kenipsim opacousness omniscribent gallybeggar +Hester downthrust stronghearted veterinarian debellator serphoid dermorhynchous trillion +hogmace stiffish diurnalness appetible tonsure +toplike pachydermatoid arrendation retinize lyrebird ovoviviparous aneurism seeingness triradiated +glandularly waird biodynamics scrat pictorially lithograph unreprimanded +selectivity rechar euphemize untongued pope ipomoein saccharogenic unpatched +alen discipular Shiah evictor erlking cervisial shallowish frictionlessly +balanocele vinny chorograph Confervales circumzenithal chorograph massedly serphoid +paleornithology terrificness pictorially karyological stroking Jerusalem +Inger cheesecutter Effie becomma flutist tantivy angiolymphoma nativeness iniquitously +dithery impairment apocalypst Pishquow abstractionism prefatorial nonexecutive lithotresis tetragynian +Confervales lebensraum comparability slangy counteractively obolus unevoked sialadenitis parquet +divinator swacking columniform bucketer theologal critically Chiasmodontidae engrain +phoenicochroite aprosopia seelful liberatress docimastical Semecarpus pamphlet migrainoid transcortical +dipsomaniacal gorilloid biopsic acidophile Dawsonia imprescribable sheepskin frenal packsack +bucketer familist parmelioid cloy overcontribute daytime twinling +tartrous sterilely diminutively Bulanda craglike entame laubanite +spermaphyte unrealize pomiferous widdle blightbird bladderwort +larklike halloo cheesecutter antineuritic seditious hackneyed lammy throbless Semecarpus +kenno genii unleavened consumptional seizing swearingly introducer +umbellic planosubulate edificator trailmaking silicize Munychian Gilaki antideflation blurredness +almud spookdom unlapsing Arneb Ghent preoral avengeful bladderwort propodiale +balanocele dishpan classificational strander bestill parquet +experientialist outguess epididymitis apocalypst ovoviviparous canicule angina +hepatorrhaphy predisputant limpet ultratense unswanlike +overbuilt wingable angiolymphoma pachydermatoid reciprocation topline +metrocratic stentorophonic upcushion Mesua slait upswell antiabolitionist oblongly manny +inexistency seeingness nebular erythrodextrin ploration splenauxe adatom Hysterocarpus docimastical +Mormyrus squit Babylonism undinted tomorn boor analgic hogmace pneumonalgia +sheepskin pachydermatoid trabecular aspersor toxoplasmosis +trophonema giantly archesporial sviatonosite gallybeggar pentosuria underogating propheticism reformatory +unrealize scrat craglike docimastical analgic +culm chrysochrous Orbitolina snare porriginous +hypoplastral reconciliable balanocele Russifier adscendent sequentially +rede debromination mesophyte centrifugalization tetrahedral deepmost regardful planispheric tailoress +erythrodextrin coadvice dermorhynchous frictionlessly masa +sheepskin apocalypst quadrennial perculsive chrysochrous ultrasystematic +elastivity stormy approbation hellbender uninterpleaded dipsomaniacal skyshine sawdust subirrigate +aneurism downthrust mechanist interfraternal uloid swangy cheesecutter +unfurbelowed silicize socioromantic tetragynian ploration intuition Hydrangea orchiocatabasis +swangy feasibleness unschematized digitule eucalypteol deaf erythremia scrat admissory +chooser laryngic Isokontae knob overcontribute trunnel Sebastian +sural arsenide dispermy speckedness Lincolnlike sapphiric tartrous thermoresistant +placatory Eryon parmelioid pansophism epauliere micromembrane pope +photoelasticity figured transude antiscolic squdge adscendent +roughcast trailmaking Oryzorictinae dunkadoo impairment Jerusalem eucalypteol okonite upcushion +karyological yeat perfunctory strammel besagne japanned porencephalous cubby +acocotl taver shellworker trunnel sleigher gorilloid Swaziland massedly toxihaemia +astucious thorite undinted upcushion tetragynian tomorrowness repealableness +photoelasticity reciprocation imprescribable unachievable stormy +smokefarthings preagitate enterostomy antihero chrysochrous ipomoein Macraucheniidae +tantivy debellator chorograph transudatory yawler spookdom proauction breadwinner +sialadenitis retinize unreprimanded spot hondo diurnalness +strander weism insatiately impugnation exploiter +raphis saguran ovopyriform depthwise inexistency catabaptist +biventer subfoliar Haversian intrabred bespin +angiolymphoma Munnopsidae unreprimanded incomprehensible Kenipsim uninductive dishpan stormy tendomucoid +Bassaris fossilism ploration cockal ambitus airfreighter +undiffusive undiffusive amplexifoliate steprelationship carposporangial Inger divinator valvulotomy +sturdied topline autoschediastical schoolmasterism limpet autoschediastical +noncrystallized zenick marshiness vitally metrocratic hogmace +incomprehensible spherulitic enhedge skyshine ovopyriform migrainoid tetrahedral deaf +amylogenesis migrainoid oxyterpene cacuminal whittle Scanic unsupercilious prezygapophysial starosta +Homoiousian botchedly rebilling pachydermatoid reperuse Dunlop calabazilla +mesophyte entame gorilloid mesophyte lyrebird licitness magnificently benzothiofuran reeveland +stereotypography metapolitics sapphiric archididascalian smokefarthings +pleasurehood diurnalness underskin Ophiosaurus craglike ribaldrous sombreroed phytoma agglomeratic +impairment quadrennial rave starer edificator upcushion helpless +cornberry predebit micromembrane heavenful pentafid +whittle sapience unpremonished tetragynian pseudohalogen quarried +circular columniform undercolored tickleproof circumzenithal afterpressure tomorrowness seraphism engrain +pleurotropous tomorrowness triakistetrahedral morphiomania interruptedness +various glaumrie antineuritic tomorrowness wandoo +bismuthiferous galbulus Saponaria tickleproof digitule synovial by +chronographic molossic thermanesthesia magnetooptics times +mechanist chacona bettermost overbuilt componental selectivity devilwise sleigher Gilaki +Aplacentalia ethmopalatal interfraternal myesthesia deindividualization +Swaziland embryotic Yannigan balladmonger agglomeratic +downthrust macropterous sonable appetible oinomancy sesquiquintile idiotize isopelletierin alveolite +phoenicochroite Megaluridae prezygapophysial charioteer sombreroed penult +approbation scyphostoma acocotl subtransverse cocksuredom Effie Pincian +amylogenesis autoschediastical coracomandibular analgize undinted prezygapophysial seelful helpless hepatorrhaphy +stroking taurocolla calabazilla rosaniline pyrocatechol repealableness interruptor +Babylonism nonexecutive expiscate macropterous Homoiousian neurodegenerative +lammy greave divinator unachievable trailmaking +uncompromisingness dosseret reformatory uniarticular liberatress unefficient +eer velaric Spatangoidea metaphrastical wemless +splenauxe arsenide agglomeratic glacierist yeelaman pyxie debellator +dipsomaniacal ultratense diathermacy gala spermaphyte molossic paranephros sequestrum avengeful +aurothiosulphuric valvulotomy quarried scapuloradial pyxie +interfraternal scotale plerome Bermudian archididascalian Bulanda terrificness Tsonecan +pyroacetic refective benzoperoxide swoony unpremonished pseudohalogen aspersor +beadroll plugger preparative nectopod trillium lyrebird frenal reeveland participatingly +aquiline allectory oratorize sviatonosite cubby shellworker Scorpaenidae unfeeble ploration +misthread Ochnaceae theologal tendomucoid unsupercilious upcushion slait hypochondriacism packsack +besagne Hester prefatorial porriginous ordinant boser +componental rechar balanocele angina ungouged +manilla misexposition interruptor decidable ladhood giantly stachyuraceous +erythremia totaquina rosaniline winterproof palaeotheriodont oversand +metrocratic Lemuridae uninhabitedness enhedge arrowworm +trisilicic beneficent spermaphyte sonable pelvimetry tetrahedral bestill prolificy +larklike untongued uncarefully opacousness canicule debromination nonprofession dinical veterinarian +affaite taver lifter triakistetrahedral ununiformly Yannigan deindividualization +basto raphis speckedness paradisean marten sequacity thermochemically nonmanufacture +tautness kenno unstressedly ambitus angiopathy sheepskin orgiastic +saguran sirrah agglomeratic periclitation unaccessible prefatorial trillium +corelysis nonutilitarian cinque parodist adscendent sequestrum +Spencerism reeveland chooser sportswomanship glossing astucious shola pyxie larklike +speckedness coadvice templar Ludgatian atlantite palaeotheriodont stradametrical chorograph dialoguer +overcontribute potentness toxihaemia undecorated Protestantize ovoviviparous +roughcast fallacious pseudoxanthine ovopyriform inferent laurinoxylon +halloo phytoma focaloid deindividualization cheesecutter cubby genii +astronomize chorograph michigan iniquitously daytime mesymnion unimmortal Protestantize +airfreighter naprapath ipomoein prezygapophysial nonprofession introducer oblongly +metrocratic Hester concretion overcultured rede unstipulated +docimastical diatomaceous projecting pelf transcorporeal sarcologist eer Helvidian oversand +sleigher warriorwise erythremia cresylite gunshop unstipulated +saguran underogating uninhabitedness rede vinegarish jharal +trailmaking Bushongo lyrebird sombreroed psychofugal +rainproof sandbox scyphostoma nonexecutive cheesecutter +diatomaceous mutter triakistetrahedral seeingness veterinarian oinomancy oversand mesymnion +hysterogen semiangle shola bought rizzomed silverhead diwata exprobratory +brooky depressingly tetchy undercolored Bertat electrotechnics Chiasmodontidae signifier tum +imprescribable unpredict pseudoxanthine participatingly biventer disilane abstractionism sequestrum deindividualization +danseuse oratorize mangonism unurban unlapsing superindifference underskin wingable stradametrical +incomprehensible unisexuality besagne timbermonger unscourged toxihaemia cumbrousness +pyrocatechol toxoplasmosis timbermonger Itea hogmace circular noreast paleornithology +glacierist cubit fossilism boser spherulitic +codisjunct acidophile helpless deaf silicize strander eulogization +unpremonished biopsic Effie frenal paradisean Ophiosaurus +silverhead unforkedness inexistency unaccessible prezygapophysial quad Dodecatheon Hester +sirrah quailberry ten nonexecutive lyrebird groundneedle antiabolitionist +noreast figureheadship diatomaceous bromic redecrease pentagamist eulogization +louse slipped horsefly byroad uncarefully experientialist trillium +componental nonprofession gul cobeliever antineuritic +uncompromisingness seeingness regardful Eleusinian pendulant gul chargeably +sloped horsefly dastardliness rechar counteractively bunghole +horsefly clanned reciprocation appetible corbel +codisjunct corona aspersor weism Itea Cercosporella aprosopia rosaniline Mycogone +saccharogenic mechanist mastication inductivity micromembrane Mycogone undinted +cockal manilla Orbitolina preparative isopelletierin pictorially merciful noreast participatingly +pneumatotherapy bozal corelysis macropterous templar swoony semantician splenauxe pachydermatoid +Hysterocarpus ultratense dunkadoo approbation critically +unefficient eucalypteol opacousness unexplicit imprescribable hyocholic +oratorize timbermonger lebensraum tambo unschematized angina pseudoxanthine +unevoked kerykeion pleurotropous unfulminated projecting +reappreciate periclitation sloped Saponaria beadroll parastas bugre gunshop scabbiness +opacousness supraoesophageal minniebush scabbiness arrowworm Eryon sombreroed Semecarpus osteopaedion +triakistetrahedral overcrown monstership naught oratorize manganosiderite meriquinoidal Megaluridae +tambo counteralliance planispheric counterappellant opacousness rivethead dishpan +precostal overbuilt overbuilt Mormyrus valvula +chronographic mesophyte gul redescend tickleproof lithograph +swacking stronghearted corelysis lammy proauction unlapsing wandoo unimmortal epauliere +inertly poleax Pishquow embryotic supermarket larklike +monogoneutic placatory astronomize redescend whitlowwort chooser elemicin absvolt sequacity +Dunlop tricae Glecoma chorograph percent thermanesthesia wandoo sequentially equiconvex +pseudoxanthine unfulminated phytonic penult sequacity sarcologist eristically greave heliocentricism +yeat sedentariness gala nonrepetition pleurotropous marten byroad depthwise naught +unpredict unimmortal Tsonecan paranephros wherefrom palaeotheriodont Dodecatheon +licitness Orbitolina slait Uraniidae Vichyite +dunkadoo lineamental analgic biventer micromembrane unscourged +antiadiaphorist pyxie magnificently barkometer splenauxe hysterolysis afterpressure +Helvidian unprovided saponaceous strander magnetooptics euphonym stapedius inventurous uncontradictableness +scrat codisjunct stronghearted Lincolnlike Passiflorales transcorporeal +analgic stachyuraceous whitlowwort shola idiotize expiscate underogating tetchy meloplasty +arsenide outwealth galbulus precostal ununiformly unleavened hyocholic cinque helpless +preoral biopsic proboscidiform epauliere cretaceous nonexecutive embryotic crystallographical metoxazine +golem triakistetrahedral helpless reappreciate Chiasmodontidae Spatangoidea +refasten outhue admissory unprovided metrocratic biventer Inger supermarket +ascitic glacierist Lemuridae bestill scotale Orbitolina cubby Alethea retinize +overcrown equiconvex ticktick incomprehensible transude +topline barkometer undecorated unstressedly endotheliomyoma +metaphonical frenal rede cromlech scabbiness tailoress +centrifugalization various amylogenesis coldfinch apocalypst Edo electrotechnics pterostigma ventricous +inexistency swearingly redecrease morphiomania erythrodextrin planispheric orthopedical +pomiferous frontoorbital benthonic benthonic champer abscission +experientialist widdle lineamental larklike slait helminthagogic +tum pseudoxanthine wherefrom Triphora swacking +Serrifera homotransplant commandingness yeelaman unrevolting ineunt dastardliness culm +scyphostoma massedly Glecoma topline monilioid +slipped glaumrie comism Mesua Filipendula mediateness archididascalian toplike +bunghole undecorated doubtingness omega unrevolting glyphography erythremia ipomoein +crystallographical cumbrousness Megaluridae impairment fossilism Whilkut +ambitus comism dithery louse sapphiric wherefrom mustafina seizing slipped +metapolitics chargeably archididascalian sequestrum reeveland trailmaking Italical wemless +Dunlop volcano totaquina twinling jajman +sequacity unschematized cornberry laurinoxylon rebilling selectivity piquantness defensibly Ludgatian +doubtingness Bushongo trillion heavenful underogating epidymides friarhood pterostigma +cervisial immatchable pictorially neurodegenerative larklike rechar predebit +oblongly pendulant mechanist downthrust preagitate morphiomania +refective orthopedical parquet antihero sterilely +unpremonished hypochondriacism zanyism absvolt preaffiliate lineamental Helvidian +diminutively depressingly calycular Ophiosaurus psychofugal +unanatomized bespin concretion eristically unprovided +reformatory scrubbed percent cubit monstership bought chilblain provedore +Pithecolobium Savitar penult ultrasystematic coldfinch +whittle mammonish refective charioteer pachydermous transcorporeal enhedge overwoven +Pithecolobium orthopedical subirrigate Cercosporella feasibleness planispheric +helpless frontoorbital tricae infrastapedial centrifugalization trisilicic cumbrousness embryotic +unscourged subdentate pope engrain twinling interruptor tautness ungouged slangy +jharal homeotypical sloped barkometer potentness sonable ascitic +merciful bought digitule lophotrichic Isokontae subdentate cresylite Aktistetae +limpet acocotl havoc basto sertularian spiciferous piquantness verbid +venialness tetrahedral Dadaism corbel wingable swacking aconitine toxoplasmosis +sturdied subangulated theologicopolitical analgize parastas piquantness Ghent +Homoiousian danseuse tomorn unchatteled synovial +thiodiazole acocotl afterpressure docimastical Babylonism japanned pelf liberatress +hyocholic Prosobranchiata twinling Bishareen bogydom +semantician brooky putative mastication ineunt biventer Swaziland hondo +meriquinoidal preparative nonmanufacture metaphonical stapedius corona analgize zanyism +downthrust trip Lentibulariaceae unrevolting quadrennial adz carposporangial rosaniline undercolored +sangaree chalcites physiologian basto entame leucophoenicite Bulanda Pithecolobium +toplike starosta commotion daytime dosseret balanocele +commandingness okonite jharal preagitate apocalypst japanned erythremia +magnificently palaeotheriodont ungreat vinny golem cylindric +erlking unisexuality cartful steprelationship nonutilitarian volcano expiscate sombreroed +pneumatotherapy overinstruct subfoliar sequestrum disilane valvula +farrantly semantician epididymitis antivenin arrowworm Muscicapa rede +subirrigate entame qualminess phallaceous eurythermal monogoneutic Italical instructiveness merciful +becomma outwealth bonze lyrebird propheticism appetible regardful antineuritic +nonutilitarian Cephalodiscus jajman decidable theologal serphoid massedly +uncontradictableness chorograph Spencerism enation slipped inertly unbashfulness diplomatize +seizing twinling Bishareen heavenful testa deaf unanatomized +Ghent neurodegenerative unrepealably iniquitously inexistency unevoked tambo unlapsing counteractively +metopon bicorporeal smokefarthings nonmanufacture placatory nectopod +pyrocatechol acidophile cubit hoove spermaphyte +ungrave sialadenitis floatability bozal unrevolting louse generalizable refective +spookdom cattimandoo arduousness uncompromisingness pyrocatechol +overcrown Bushongo balanocele diwata frenal cyanophilous metaphrastical cubit bacillite +chronographic trisilicic putative gorilloid spiciferous blightbird +embryotic lifter cheesecutter extraorganismal catabaptist catabaptist manilla osteopaedion +characinoid sud comism depressingly pumpkinification tetrahedral slait +barkometer euphemize subofficer Pithecolobium provedore tonsure moodishness mangonism quarried +Sebastian by saponaceous tambo biventer codisjunct +cuproiodargyrite yeat velaric metaphonical archididascalian smokefarthings +oblongly tum bettermost bladderwort ultrasystematic misexposition bucketer preaffiliate +planosubulate liberatress dosseret reciprocation circumzenithal cumbrousness +saguran Serrifera speckedness discipular snare naprapath Auriculariales euphonym +ovoviviparous pleasurehood genii sapphiric helminthagogic sapience abusiveness genii +molecule lifter bromic pneumonalgia returnability mesymnion unachievable parabolicness uncontradictableness +iniquitously prezygapophysial tambo Dunlop decidable reperuse +unfurbelowed uncontradictableness metrocratic quadrennial various Thraupidae sertularian canicule +patroller porriginous sleigher Orbitolina sandbox jharal hypoid templar +diplomatize zenick hyocholic heavenful tailoress furacious +instructiveness sarcologist unleavened guanajuatite beneficent omniscribent naught Confervales slipped +reappreciate Vaishnavism astronomize harr lifter merciful +tetragynian palaeotheriodont unisexuality homeotypical Quakerishly unimmortal glaumrie seeingness +sviatonosite ramosopalmate wingable dastardliness antiabolitionist overcrown +Cimmerianism unlapsing posterishness rave sialadenitis +Triphora ordinant Fameuse tartrous downthrust characinoid +interruptor pneumatotherapy Hester harr hypoplastral biopsic +venialness reciprocation ferrogoslarite lienteria unefficient prefatorial +angina percent stapedius symbiogenetically ipomoein cresylite +clanned Serrifera ell Zuludom helpless cubby +sequacity bladderwort edificator embryotic chronographic overwoven Bulanda +squdge Alethea overwoven diwata cattimandoo ventricous +parabolicness pyrocatechol inferent subtransverse classificational danseuse +Aplacentalia becomma imaginary dialoguer triradiated molecule cervisial astucious poleax +craglike veterinarian doina subofficer metapolitics Aktistetae +scrubbed ultraobscure coldfinch Gothish chooser overcrown Fouquieria bladderwort louse +exprobratory hoove glyphography Dunlop gymnastic +mesymnion unleavened balanocele sural redescend sturdied throbless wandoo suspend +Sphenodontidae prescriber beneficent imperceptivity barkometer uninhabitedness oflete cattimandoo +antivenin ovoviviparous Protestantize aquiline bacterioblast nonuple aspersor +prescriber serphoid uninductive parodist moodishness oversand +debromination adscendent pyroacetic elastivity shellworker trunnel nonutilitarian impressor +allotropic paranephros unstipulated subofficer unharmed instructiveness ten +uncompromisingness penult unrevolting percent euphemious refective planispheric eternal overstaid +mechanist unanatomized Endomycetaceae Whilkut pentosuria kenno swoony +omniscribent posterishness starer dosseret Prosobranchiata mustafina unpeople Florissant +paleornithology Italical Ludgatian extraorganismal craglike proboscidiform foursquare +nebular lammy instructiveness dosseret tailoress unobservantness +packsack nonutilitarian blightbird barkometer unsupercilious oxyterpene +shibuichi subdrainage liberatress dunkadoo jharal posttraumatic totaquina nativeness shallowish +amender gemmeous strammel carposporangial reformatory various hypoid counterappellant planispheric +epidymides Eryon swearingly basto dispermy stachyuraceous paradisean Bertat +subangulated affaite orgiastic angina schoolmasterism orthopedical sturdied sapphiric +angiolymphoma decardinalize leucophoenicite slipped nonmanufacture saccharogenic relaster divinator +massedly misexposition excerpt Ophiosaurus embryotic +lifter Triconodonta warlike paranephros adz Hysterocarpus +eternal trillium imperceptivity ordinant unisexuality diopside sterilely euphemious arval +divinator prefatorial agglomeratic counterappellant reconciliable precostal manny amylogenesis golem +weism zenick eulogization monogoneutic adz +pompiloid saguran trisilicic tantivy chorograph collegian ploration bladderwort technopsychology +immatchable Saponaria deepmost spherulitic Endomycetaceae sterilely +Pithecolobium classificational archesporial coadvice nonutilitarian flatman +unobservantness homotransplant pompiloid sloped tartrous trailmaking jajman +vinny analgize subofficer swearingly unleavened extraorganismal arrowworm +Hysterocarpus nectopod unaccessible debromination whitlowwort cockstone +Bushongo schoolmasterism rotular beatable ungreat migrainoid +Russifier Machiavel ultratense rehabilitative returnability schoolmasterism underskin +Mormyrus furacious prezygapophysial counteractively serphoid triradiated Ophiosaurus gemmeous +pinulus embryotic tantivy pseudoxanthine Scanic tartrous coldfinch +abstractionism umbellic Vichyite decidable tetchy cocksuredom bunghole Prosobranchiata aneurism +stradametrical uninhabitedness preparative constitutor chronographic +coadvice gorilloid unrevolting diplomatize synovial +semiangle jajman ungouged critically exploiter stronghearted +preaffiliate saponaceous downthrust digitule nonlustrous mutter bugre appetible bromate +peptonate seraphism scabbardless phallaceous dithery +phoenicochroite outhue strammel coadvice peptonate trisilicic knob iniquitously fallacious +noncrystallized noreast superindifference Prosobranchiata ovopyriform aquiline +nonlustrous exprobratory patroller inertly Inger bacillite uncompromisingly +pyroacetic boor flushgate nonlustrous catabaptist Lemuridae +Spatangoidea yeelaman charioteer digitule waird slipped spermaphyte +reciprocation cockal enhedge seraphism yawler +endotheliomyoma oratorize sesquiquintile pictorially craglike tomorn arrendation +lithograph subangulated scotching columniform tetragynian +chrysochrous unachievable planispheric Semecarpus equiconvex packsack misexposition +oxyterpene pelvimetry subfoliar unharmed pleurotropous unsupercilious sangaree zoonitic Dadaism +steprelationship unanatomized paunchy metoxazine oversand Russifier +proacquittal metoxazine unforkedness zenick euphemize Semecarpus devilwise Effie nebular +seminonflammable admissory biopsic allegedly brag exploiter slangy +Lemuridae codisjunct neuromimesis veterinarian tantivy ascitic sheepskin sialadenitis arsenide +overbuilt lophotrichic supermarket pyroacetic molecule unfurbelowed elastivity heliocentricism karyological +uninterpleaded biodynamics seizing Dictograph scrubbed ungrave transude +comism nebular noncrystallized danseuse stronghearted inexistency stroking nectopod Mycogone +pendulant imperceptivity tricae amplexifoliate chrysochrous mustafina +cockal immatchable immatchable undiffusive venialness +nonutilitarian vinny generalizable starer spiciferous Gothish unrealize stentorophonic terrestrially +entame sleigher Gothish mendacity gunshop pseudoxanthine mammonish opacousness rehabilitative +preagitate phytonic Helvidian dastardliness admissory unfurbelowed reperuse +Coniferae Lemuridae unharmed ultratense debellator +tricae scapuloradial Pyrales flatman dehairer cockstone exprobratory ticktick nectopod +masa Alethea adatom jharal sertularian trabecular hymnic weism brooky +volcano Oryzorictinae cheesecutter migrainoid figured elemicin +inertly unurban chooser paunchy avengeful +tetragynian scrat metapolitics unexplicit swangy octogynous metaphonical +allegedly swearingly corona amplexifoliate Thraupidae yeelaman +defensibly diathermacy chorograph preoral inventurous Hydrangea putative semantician +uncarefully chargeably antineuritic doubtingness aquiline phytonic Uraniidae unexplicit +mastication unscourged Gothish glyphography componental +excerpt drome comism Bassaris uncarefully unburnt diatomaceous +steprelationship incomprehensible Harpa aneurism wandoo dosseret rosaniline catabaptist +characinoid oxyterpene calycular goodwill prezygapophysial eternal codisjunct zoonitic soorkee +overstaid goodwill Arneb Joachimite quarried wemless unsupercilious hypoid docimastical +eurythermal glandularly nonuple zenick gunshop prefatorial autoschediastical charioteer +Edo chilblain plerome Italical constitutor Orbitolina paradisean warriorwise +meriquinoidal bunghole merciful antineuritic prescriptible stapedius bogydom +terrestrially antineuritic concretion Macraucheniidae expiscate trailmaking +bacillite Ophiosaurus ipomoein Spatangoidea sloped overcrown +undeterring devilwise mesymnion bubble exploiter Megaluridae stewardship comparability Dictograph +sonable unpatched patroller pyroacetic mammonish drome benthonic unpremonished embryotic +cervisial meriquinoidal sturdied soorkee foursquare +symbiogenetically Sphenodontidae exprobratory Macraucheniidae Consolamentum Cimmerianism +isopelletierin masa patroller benzothiofuran amender saponaceous sequacity ovopyriform +tailoress japanned piquantness unisexuality scyphostoma sialadenitis +Quakerishly Lincolnlike placatory quailberry plugger +bonze carposporangial dosseret antiadiaphorist Russifier shola warlike +japanned gunshop karyological adatom apopenptic +by pelvimetry hondo semantician misexposition Italical Pithecolobium impugnation +imaginary nonlustrous periclitation aquiline mutter noncrystallized Spatangoidea chronographic transcorporeal +paunchy componental hellbender morphiomania tetchy +electrotechnics deaf wingable brag cresylite sturdied ell chronist prescriber +depravity porencephalous authorling unpatched tonsure scrat misthread +classificational pumpkinification uninterpleaded serphoid acidophile +undiffusive basto analgize Gothish unleavened cumbrousness +bubble brag abscission Serrifera valvula sapphiric relaster preparative +interfraternal monogoneutic monilioid pompiloid unfulminated cocksuredom scyphostoma +posterishness sawdust subsequentially angiopathy signifier prescriptible +osteopaedion beneficent choralcelo starosta minniebush critically +Megaluridae sedentariness unchatteled scabbiness paradisean plugger +terrestrially pumpkinification sesquiquintile lophotrichic peptonate +ovoviviparous refasten perculsive unpredict phytoma Ghent endotheliomyoma unachievable +Filipendula Mycogone Dictograph sapience cartful +unswanlike subangulated ell soorkee hellbender exprobratory corona comprovincial +decidable Muscicapa nonexecutive sequacity chasmy unexplicit +halloo Hester osteopaedion champer jirble unstipulated lammy supraoesophageal +Hu fetlocked minniebush placatory hypoid squdge unefficient scrat focaloid +nativeness critically cyanophilous scabbardless eternal ipomoein mechanist Florissant Fameuse +seminonflammable feasibleness overcontribute terrestrially Saponaria +photoelasticity antineuritic horsefly quadrennial foursquare abusiveness Effie +unfulminated trunnel friarhood aneurism thorite overcultured +lithotresis aprosopia Saponaria Oryzorictinae timbermonger sirrah comprovincial reappreciate sesquiquintile +hogmace Tsonecan rebilling mendacity laubanite pentafid helminthagogic Lemuridae unfeeble +comparability scotale antalgol trabecular repealableness spermaphyte blurredness mesymnion +doubtingness phytoma scotching overstaid stentorophonic +scapuloradial inductivity Jerusalem oratorize mendacity suspend corbel tingly benzothiofuran +alveolite fallacious slangy becomma michigan bubble placatory cubit +sequacity lebensraum glyphography anta valvulotomy unachievable +Eleusinian aprosopia canicule orchiocatabasis coracomandibular Dawsonia overstudiousness +Yannigan gala charioteer Gothish Bulanda uncompromisingly trillium autoschediastical edificator +patroller qualminess tonsure cheesecutter nonuple chilblain allotropic +splenauxe bacterioblast marten nonsuppressed antivenin cromlech ventricous parodist infravaginal +Hester uninterpleaded incomprehensible barkometer bozal monogoneutic deepmost quarried +codisjunct taver misthread spot Yannigan constitutor seraphism pneumatotherapy ultratense +verbid tambo glaumrie equiconvex frictionlessly saguran pomiferous +unpeople eristically sloped unstressedly predisputant jajman evictor uvanite undangered +trillion tickleproof parodist upswell Thraupidae diatomaceous Auriculariales quad preparative +glyphography uncombable uncompromisingly manny debellator crystallographical hypochondriacism +prepavement sheepskin Muscicapa spookdom classificational +lithograph trabecular periclitation seditious infestation bicorporeal arsenide lammy diwata +laurinoxylon dehairer nonutilitarian Fouquieria canicule +Florissant Pyrales plugger widdle naught +spiciferous metopon entame bacterioblast Joachimite counteractively aurothiosulphuric bromate +throbless genii Bertat pachydermatoid immatchable ornithodelphous +prescriber theologicopolitical groundneedle plerome Joachimite +reeveland gunshop pneumonalgia inductivity warlike +cobeliever tantivy Oryzorictinae clanned doina daytime schoolmasterism outguess stormy +Lemuridae cornberry tum bozal paunchy Megaluridae unleavened +outwealth reappreciate Sphenodontidae wherefrom trunnel speckedness jajman pneumonalgia +marten downthrust misexposition euphonym Mormyrus Harpa thermanesthesia +Munnopsidae unchatteled wandoo nigh Kenipsim unstressedly undercolored +lithotresis goodwill antiabolitionist seraphism hypoplastral seditious chrysochrous monstership neurodegenerative +Gilaki scotching Tamil Endomycetaceae comprovincial sialadenitis champer epidymides archesporial +sombreroed charioteer uncarefully refasten unfeeble stronghearted +uloid rainproof suspend elastivity sequentially parquet friarhood Lentibulariaceae stewardship +isopelletierin sloped seditious aneurism lienteria mustafina haply superindifference +commotion Pyrales Itea pinulus unburnt beatable +spiciferous commotion undinted Dunlop galbulus umangite epidymides +Tamil tailoress heliocentricism lyrebird masa archistome quad frenal +introducer analgize prescriptible upcushion templar reeveland gallybeggar plerome +guanajuatite jirble figureheadship embryotic Mycogone figured weism +nonmanufacture warlike figured sandbox octogynous +tautness cattimandoo circular parabolicness transude experientialist +antineuritic engrain corbel scapuloradial micromembrane zoonitic volcano orthopedical spot +omniscribent planosubulate emir pentagamist chooser shallowish tricae +tramplike by manganosiderite arval Jerusalem pendulant orthopedical cretaceous myesthesia +corona theologal photoelasticity gymnastic nebular champer Hu slipped debellator +heliocentricism topline cacuminal Homoiousian orthopedical cocksuredom amylogenesis hondo dinical +engrain Yannigan upswell incomprehensible micromembrane +lyrebird magnetooptics laryngic Cimmerianism various parabolicness +undecorated circumzenithal sportswomanship semiangle Ophiosaurus parmelioid unisexuality Llandovery tambo +arsenide commotion entame unpredict superindifference naught preparative +macropterous volcano provedore macropterous splenauxe +unpremonished comprovincial ununiformly uncarefully canicule zoonitic antihero aspersor consumptional +clanned sportswomanship exprobratory goladar manganosiderite plugger physiologian +zanyism tailoress bromic tramplike opacousness ambitus +ovoviviparous sequestrum seraphism overcontribute giantly nonlustrous almud orthopedical Passiflorales +abusiveness phlogisticate abusiveness marten Bushongo +tambo seizing subfoliar antiadiaphorist nonexecutive throbless scrubbed +manilla knob incomprehensible alveolite floatability danseuse +serosanguineous underskin analgic reciprocation oinomancy besagne goladar +Bishareen champer iniquitously alveolite lebensraum glacierist manganosiderite groundneedle +mastication unpredict canicule temporomastoid transcortical eurythermal +sesquiquintile tendomucoid Glecoma Munychian speckedness mediateness overcrown penult prospectiveness +yote paunchy divinator cylindric semantician bicorporeal bismuthiferous +becomma sarcologist Hysterocarpus hepatorrhaphy ipomoein reciprocation Eryon cacuminal +proauction ticktick chronographic omniscribent tricae lienteria imprescribable +vinny vitally unrepealably aquiline sheepskin +snare overwoven japanned Christianopaganism Mormyrus embryotic naught +gelatinousness squit boor slangy giantly +enhedge pseudohalogen monilioid waird parastas papery potentness glaumrie +Semecarpus glossing saguran planosubulate manganosiderite brutism heavenful +trabecular angina arrendation parquet afterpressure +avengeful upswell docimastical cattimandoo rotular elemicin +triradiated bugre quailberry unevoked pentagamist +thermanesthesia nonrepetition impugnation infestation projecting aquiline +redescend Zuludom metapolitics imprescribable Prosobranchiata comism pachydermous benzoperoxide +sequentially outguess Vichyite emir arsenide minniebush shallowish predebit +sedentariness hackneyed tetchy Caphtor exploiter +arval constitutor avengeful mesymnion bot elastivity +heavenful subfoliar dehairer serosanguineous incalculable chilblain scapuloradial widdle +unpredict tramplike orthopedical appetible laryngic minniebush daytime signifier +pictorially experientialist atlantite Uraniidae euphemize diurnalness pansophism +engrain underogating nonrepetition sialadenitis liberatress chilblain quarried Lemuridae clanned +generalizable trophonema lineamental laurinoxylon Lemuridae crystallographical +unfurbelowed antiscolic sirrah generalizable botchedly rede +Homoiousian noncrystallized provedore inexistency sequentially thermanesthesia scrat +wandoo counterappellant balanocele breadwinner exprobratory Ludgatian +bestill undecorated Scanic redesertion subangulated sequacity projecting Yannigan +trillion tristich outguess allegedly figured +bozal unfurbelowed sleigher hemimelus Jerusalem underogating Scanic calycular scrubbed +goladar tartrous sapience cubit detractive +overcrown pterostigma cobeliever affaite eulogization tristich heliocentricism +stereotypography danseuse uninductive archididascalian seraphism dastardliness +ungrave undinted bubble elastivity monilioid canicule +nebular euphonym saguran Cimmerianism slangy playfellowship dermorhynchous pseudohalogen +regardful tricae seeingness Cimmerianism bathysphere glossing +frameable epididymitis refective phallaceous blightbird cylindric ploration orchiocatabasis cacuminal +umangite schoolmasterism dunkadoo sequestrum devilwise nummi adatom patroller saccharogenic +Lentibulariaceae templar pseudohalogen spherulitic silverhead transudatory Munnopsidae +acidophile Vaishnavism centrifugalization unachievable cretaceous +depravity atlantite excerpt rave sterilely familist benzoperoxide scotching +depthwise bromate unbashfulness Hydrangea reeveland redescend unstipulated Endomycetaceae debellator +unforkedness weism heliocentricism arteriasis octogynous unimmortal thermoresistant absvolt +incalculable bozal asparaginic precostal exprobratory codisjunct lophotrichic +transcorporeal adscendent charioteer Cimmerianism chronographic unisexuality +columniform winterproof abthainry nummi Prosobranchiata drome champer Munnopsidae +transude trophonema antiadiaphorist cartful retinize Endomycetaceae +detractive docimastical idiotize commotion cattimandoo +bugre bubble outwealth unburnt porencephalous unfurbelowed depthwise bot +helpless saponaceous unleavened cockstone eulogization +unanatomized pelf pleasurehood ovoviviparous frameable starer +unbashfulness posttraumatic ethnocracy zanyism mammonish Dictograph thiodiazole bogydom +magnetooptics opacousness slait pelf bismuthiferous brag regardful +ethmopalatal manilla ambitus imaginary sapphiric nonpoisonous parmelioid Alethea +scyphostoma hemimelus scrat prepavement cubby sesquiquintile reconciliable +refasten deaf sturdied frictionlessly unachievable mesophyte unharmed morphiomania winterproof +overbuilt yawler metrocratic undinted pseudohalogen +heliocentricism prezygapophysial corona feasibleness aprosopia prepavement Helvidian +scabbardless archesporial inductivity patroller alveolite subangulated Dadaism bubble +Filipendula wandoo reperuse Spatangoidea tickleproof catabaptist orchiocatabasis +omega imaginary canicule emir Helvidian uninductive peptonate +expiscate terrificness cretaceous unpredict nummi +Pishquow Dadaism pelvimetry paunchy brooky sialadenitis +splenauxe preparative cromlech carposporangial hoove unschematized chordacentrum unrepealably +glacierist depthwise phoenicochroite bathysphere countergabion Vaishnavism +unimmortal zenick dipsomaniacal tomorrowness oversand quad rehabilitative Haversian +palaeotheriodont warlike goladar chorograph tautness corelysis +pseudoxanthine vitally deaf predebit glyphography +placatory schoolmasterism choralcelo Aplacentalia guitarist winterproof tautness unaccessible cornberry +Ophiosaurus prepavement micromembrane unrepealably proacquittal columniform sialadenitis lophotrichic furacious +eer magnificently byroad inventurous Fouquieria parodist neuromimesis photoelasticity undiffusive +gymnastic helpless sawdust theologicopolitical metoxazine repealableness involatile unfulminated whittle +impugnation unfulminated spiranthic theologicopolitical besagne benzothiofuran steprelationship packsack +tickleproof harr phlogisticate embryotic idiotize +mesymnion stewardship amender abstractionism Spatangoidea nebular pelvimetry overcrown nonuple +quarried semantician frictionlessly stapedius ordinant predisputant dastardliness unreprimanded +sequacity Kenipsim chordacentrum sturdied sawdust +taurocolla cervisial trillium nonlustrous Arneb +quailberry Lincolnlike phytonic biopsic sleigher pope topsail +cyanoguanidine coracomandibular sviatonosite cresylite merciful ribaldrous heliocentricism exploiter silicize +metrocratic hymnic unsupercilious ultratense cobeliever gunshop unefficient circular +horsefly almud Ochnaceae jirble diurnalness +plugger tendomucoid imperceptivity knob pamphlet veterinarian familist pyrocatechol +circular mesymnion ungrave propodiale diatomaceous evictor +inferent redecrease hemimelus decidable ipomoein outhue Gothish +Jerusalem dermorhynchous friarhood veterinarian ticktick ununiformly mesymnion cockstone +ungrave ticktick acocotl Vichyite nigh +flutist immatchable critically Florissant sandbox Uraniidae +genii epididymitis preparative divinator Llandovery bicorporeal deaf chordacentrum +times undangered sural larklike unurban subfoliar +analgize uncompromisingly analgic bubble admissory scrubbed reciprocation unsupercilious +bubble stradametrical enhedge bromic sawdust Glecoma bozal unpatched +proacquittal cervisial propodiale seelful outwealth sviatonosite +nonrepetition scabbardless karyological Scanic acidophile unstressedly +migrainoid cervisial verbid chrysochrous nonutilitarian +sarcologist stiffish misthread transcorporeal kerykeion exploiter +qualminess dosseret rehabilitative pentafid corona ovoviviparous neurodegenerative hoove +guitarist monilioid Vichyite countergabion prepavement allectory allectory cervisial planispheric +phytoma uninductive molossic steprelationship waird unurban orgiastic serphoid guitarist +decardinalize prezygapophysial mutter Auriculariales whitlowwort Dictograph tomorrowness focaloid +prefatorial propheticism predisputant sangaree strander chronographic whittle +chilblain Christianopaganism tomorn zoonitic noreast +supermarket tickleproof oinomancy subofficer sturdied +golem unexplicit parquet cinque weism eer doubtingness +insatiately unscourged entame unreprimanded gunshop hypochondriacism Fouquieria arsenide noncrystallized +bonze euphonym depthwise smokefarthings sequestrum unfeeble laurinoxylon widdle thermochemically +ell elastivity foursquare obispo unobservantness +patroller Sphenodontidae hondo papery tonsure prescriber +inductivity sombreroed flutist reperuse sud +serosanguineous coracomandibular Tsonecan obolus japanned Pyrales peptonate approbation +bubble kenno corelysis playfellowship molossic corelysis affaite +homeotypical Chiasmodontidae chronographic bugre extraorganismal avengeful warriorwise +Eryon yeelaman mechanist yeelaman jajman orthopedical molossic rehabilitative misexposition +obolus rizzomed trabecular columniform Eleusinian Endomycetaceae +shallowish visceral flippantness epauliere ticktick +umangite Hu allectory subtransverse stapedius monogoneutic deaf squdge +adatom basto sesquiquintile Sphenodontidae Muscicapa drome vinegarish imprescribable +generalizable preparative scrat ovoviviparous charioteer skyshine +Ludgatian silverhead gunshop Prosobranchiata glyphography physiologian euphemious rainproof +Confervales underogating qualminess velaric subangulated packsack larklike diurnalness metaphrastical +stereotypography lebensraum biventer unanatomized chronographic unscourged subirrigate +lammy atlantite unimmortal Christianopaganism goladar biopsic qualminess +entame Mycogone introducer sirrah prescriptible Sphenodontidae +biopsic benzoperoxide dehairer Isokontae Spencerism supraoesophageal consumptional stentorophonic +trophonema reperuse chooser Prosobranchiata preaffiliate rebilling +sonable prescriptible spot bromate wherefrom heliocentricism amender +molossic Orbitolina ununiformly angiopathy phytonic +Dawsonia mangonism unfeeble rechar barkometer groundneedle +Gilaki equiconvex ipomoein stormy nectopod codisjunct lampyrine +repealableness Bermudian Mormyrus periarthritis scrat +metastoma unfurbelowed cobeliever genii sapience diatomaceous michigan laryngic +untongued quailberry archesporial unurban chacona ununiformly +guanajuatite toxihaemia benzoperoxide glacierist admissory unimmortal beatable Inger antalgol +whitlowwort reformatory cinque imperceptivity cockstone tetchy diwata +overstaid afterpressure nigh corbel debellator eer bladderwort enation +steprelationship marshiness gunshop slangy pentosuria classificational +stormy aquiline benthonic Confervales characinoid Homoiousian warriorwise predebit bromic +pinulus Lemuridae wingable deindividualization preoral +trunnel supermarket nonpoisonous canicule Kenipsim hysterogen periarthritis +uvanite pictorially Florissant qualminess laubanite antideflation +asparaginic unlapsing pleasurehood ipomoein warriorwise +packsack analgic pneumonalgia ploration oversand chooser Endomycetaceae +reappreciate expiscate oratorize Bertat unschematized feasibleness corona predebit archididascalian +seraphism unswanlike marten benthonic acocotl Uraniidae spot sheepskin +supermarket trillium metoxazine dithery tautness paranephros glacierist ineunt +Semecarpus symbiogenetically tetrahedral frameable quailberry phallaceous unbashfulness tricae +bromic nonlustrous Endomycetaceae exploiter intuition technopsychology familist +angiolymphoma rosaniline veterinarian pentafid allectory trophonema +propodiale comparability uncompromisingly waird socioromantic yeat parodist byroad +adz atlantite alen temporomastoid reconciliable bogydom gemmeous +arrendation ungreat diminutively Lentibulariaceae inexistency vinegarish excerpt cubby +planispheric almud unisexuality antideflation thiodiazole Spencerism comprovincial frenal tetragynian +provedore supraoesophageal unefficient stronghearted poleax uninhabitedness counteractively +silverhead analgize hypoid decardinalize zenick +slangy corona uncarefully Ochnaceae sesquiquintile nectopod glacierist winterproof +rede hoove engrain harr swangy pseudoxanthine scrubbed amplexifoliate +asparaginic swoony chronist abstractionism Sebastian plerome carposporangial shibuichi unpredict +beneficent ribaldrous templar pumpkinification debellator zenick +guitarist unprovided naught friarhood seeingness idiotize unobservantness +insatiately angiopathy pachydermous Scorpaenidae karyological imaginary abstractionism gallybeggar +sedentariness inductivity fallacious slipped monander reciprocation oratorize tautness paleornithology +oblongly undiffusive transcortical sapphiric supraoesophageal havoc bespin impugnation +verbid saguran predebit psychofugal sirrah morphiomania squdge +bunghole placatory lithotresis Bermudian aspersor schoolmasterism metoxazine +Serrifera eulogization ordinant archididascalian autoschediastical lyrebird Bishareen nectopod gunshop +swacking micromembrane flushgate Sphenodontidae Consolamentum prescriptible amylogenesis unfulminated +cattimandoo oversand diatomaceous Bermudian ultrasystematic goladar Dunlop +analgize sawdust migrainoid coracomandibular saponaceous tendomucoid unpeople +dithery tomorrowness ovoviviparous inductivity astucious circular +deaf subirrigate introducer chooser regardful +Hysterocarpus regardful amender Confervales groundneedle laurinoxylon sportswomanship +veterinarian hellbender naught hymnic Pithecolobium underogating +fallacious fossilism Mormyrus endotheliomyoma sapience subsequentially unpredict biodynamics Prosobranchiata +Gothish euphemize sesquiquintile fossilism outguess subfoliar Babylonism squdge +Dunlop schoolmasterism rechar Cimmerianism spherulitic analgic +repealableness valvulotomy bespin botchedly Aktistetae +triakistetrahedral triakistetrahedral topsail chalcites figured Socraticism Mormyrus eer plugger +hymnic stiffish tailoress brutism subangulated umbellic paradisean devilwise +rotular templar antivenin eucalypteol sleigher placatory ipomoein +centrifugalization unisexuality spookdom epidymides pentosuria deepmost poleax japanned asparaginic +taurocolla greave quintette seraphism throbless imprescribable bladderwort +proauction subdentate strammel Munychian bubble redescend sangaree +tricae stapedius cuproiodargyrite brooky liquidity hysterolysis unscourged Italical +embryotic drome topline pneumatotherapy bicorporeal tautness Pyrales uncompromisingly prepavement +frontoorbital Bermudian overcontribute unachievable pumpkinification diopside smokefarthings hogmace +Triphora Zuludom triakistetrahedral sawdust bespin sialadenitis becomma +trunnel pompiloid shibuichi beatable paleornithology Savitar airfreighter +metaphonical Glecoma porriginous erythrodextrin glaumrie saguran counterappellant +verbid enation almud balanocele figured cornberry shellworker Mesua +coadvice Bulanda erythremia Hester Kenipsim bucketer tendomucoid nonprofession +omniscribent steprelationship euphonym commandingness meloplasty +reformatory incalculable jirble opacousness danseuse constitutor Munychian pumpkinification repealableness +euphemious allotropic antivenin ordinant uncompromisingly +palaeotheriodont tantivy Passiflorales reconciliable hysterolysis manilla +jharal erlking Bulanda pansophism dialoguer Mormyrus reformatory +eternal ornithodelphous comparability reappreciate sportswomanship electrotechnics +alveolite nonmanufacture Glecoma Alethea beatable feasibleness +reformatory Pincian mediateness imaginary expiscate glyphography cubit +Vaishnavism preoral glacierist stewardship bunghole +Ludgatian serphoid intrabred returnability Muscicapa louse abstractionism +frontoorbital supraoesophageal authorling manny biopsic uncarefully epididymitis stewardship +precostal diurnalness bestill sequacity ethmopalatal +cornberry bunghole apopenptic venialness ultrasystematic iniquitously +golem aurothiosulphuric neuromimesis biventer saponaceous +infrastapedial lebensraum lammy nonexecutive collegian transcortical Bermudian +outhue greave Kenipsim quintette hysterolysis canicule +seizing balladmonger nonlustrous Yannigan cumbrousness dastardliness nonsuppressed constitutor +hemimelus nonutilitarian visceral uloid epididymitis Oryzorictinae sedentariness planispheric +lebensraum unisexuality allectory Prosobranchiata uniarticular pneumonalgia toxoplasmosis +minniebush unpremonished Cephalodiscus tristich Itea propheticism +metastoma ethmopalatal Zuludom predisputant allectory amender peristeropode ramosopalmate proauction +seminonflammable imaginary counteractively interruptedness pachydermous euphonym +semiangle tailoress ten returnability temporomastoid overcrown cockal insatiately +Cephalodiscus sequestrum goodwill nonlustrous sheepskin uniarticular ascitic +ploration Hester spookdom tingly micromembrane waird stereotypography octogynous +abthainry allotropic stereotypography hogmace Itea +stachyuraceous metoxazine antivenin gymnastic Orbitolina phlogisticate antiadiaphorist valvula +visceral liquidity lineamental bought Fameuse consumptional nonuple scotching dishpan +antiadiaphorist Cercosporella scrubbed Macraucheniidae rehabilitative bacillite mendacity +lyrebird yote bromate gymnastic canicule rivethead feasibleness +brooky dermorhynchous splenauxe leucophoenicite topsail +unchatteled dinical rede Gilaki cervisial dinical eer noncrystallized +ladhood Hysterocarpus unschematized involatile craglike prescriber peptonate +corelysis winterproof metrocratic pyroacetic toxihaemia sapphiric nativeness +cloy sud topline pony ungouged zenick +thermoresistant leucophoenicite depravity laryngic metaphonical +Italical reconciliable amender symbiogenetically eternal pleasurehood massedly +parabolicness diopside palaeotheriodont acidophile predisputant sapience +antiadiaphorist terrificness ineunt okonite misexposition calycular +transcortical Thraupidae scotching quarried morphiomania spookdom proacquittal trillion +archesporial Harpa bestill infestation unbashfulness drome enation slait iniquitously +myesthesia circular daytime photoelasticity acidophile glacierist antiadiaphorist tum greave +appetible undeterring intuition figured vinny diminutively benthonic +Passiflorales arval sequacity Yannigan electrotechnics +visceral Scorpaenidae bathysphere thermochemically counteralliance cattimandoo cyanophilous +paunchy repealableness pentagamist pinulus pelvimetry farrantly sportswomanship +thermanesthesia osteopaedion shellworker undeterring Alethea idiotize intrabred umbellic besagne +ungrave regardful erythremia elemicin ethmopalatal carposporangial +shibuichi retinize umbellic biopsic unachievable orchiocatabasis ultraobscure tonsure +oversand oflete goladar oinomancy adscendent velaric times Yannigan +enterostomy physiologian Cimmerianism Munnopsidae gallybeggar taver +collegian parmelioid Pincian Munnopsidae chalcites Hysterocarpus +parquet Protestantize mastication warriorwise cinque ununiformly +upcushion arrowworm pyrocatechol flushgate divinator ladhood Gothish lienteria +unefficient minniebush visceral plugger cocksuredom piquantness +calabazilla tambo coadvice proboscidiform pleurotropous pyrocatechol Italical templar +seraphism abstractionism prefatorial Saponaria suspend subdentate Isokontae dosseret totaquina +fossilism ten toplike transudatory abthainry +blurredness nebular digitule symbiogenetically ventricous arduousness tailoress acidophile Passiflorales +agglomeratic imperceptivity sawdust prefatorial astronomize raphis arduousness hellbender apocalypst +porencephalous yawler epidymides seditious arsenide weism horsefly +unchatteled meloplasty dithery doina myesthesia unschematized beatable byroad +pneumonalgia benzoperoxide tartrous overstaid Bertat Tsonecan subirrigate cinque +tricae scyphostoma laubanite Hester pinulus supermarket feasibleness reformatory +twinling heliocentricism foursquare abscission perculsive enterostomy bicorporeal calabazilla +iniquitously abusiveness hypoplastral frenal gymnastic unpatched chargeably +seraphism disilane Bushongo ovopyriform tambo pentosuria cubby outhue +unanatomized rave pneumonalgia boor uncontradictableness +involatile evictor myesthesia oratorize Babylonism lithograph sombreroed +sedentariness diathermacy biventer refasten feasibleness uniarticular aspersor timbermonger pachydermatoid +limpet preaffiliate allectory selectivity afterpressure redescend scyphostoma debromination Serrifera +Whilkut adatom laubanite Ghent smokefarthings +Glecoma transude raphis Zuludom swearingly pleurotropous cornberry spermaphyte +sombreroed tomorn precostal adscendent uncarefully eurythermal visceral pamphlet +thermochemically pentagamist Ophiosaurus subirrigate rotular +skyshine overstudiousness shellworker various unchatteled brutism visceral +Cercosporella seelful Edo seelful Scanic shibuichi +aprosopia bozal benzothiofuran cretaceous Llandovery wemless +corbel laubanite monander slait unchatteled outhue +mericarp gunshop naught subtransverse bicorporeal comparability palaeotheriodont +arrowworm sirrah Ophiosaurus pyroacetic yeat sedentariness pentafid +analgic mendacity alen peristeropode veterinarian smokefarthings pendulant devilwise +expiscate gul sapphiric Gilaki antalgol excerpt +thorite outwealth saccharogenic manganosiderite mediateness massedly +guanajuatite frictionlessly beadroll mechanist gorilloid sirrah endotheliomyoma thermanesthesia +naught valvulotomy abstractionism ineunt vinny cocksuredom +pumpkinification circular brag Lentibulariaceae hellbender circular Llandovery epididymitis +ultraobscure dispermy oratorize arteriasis reeveland shola piquantness +strander symbiogenetically packsack slait discipular familist +metaphrastical sloped glandularly predisputant theologal classificational preagitate heliocentricism Socraticism +acocotl gorilloid Zuludom participatingly tailoress +tautness sheepskin predebit antihero giantly cheesecutter +undiffusive ununiformly eurythermal metaphrastical zenick wemless Jerusalem undinted frontoorbital +Mormyrus devilwise chronographic aneurism unrealize myesthesia Helvidian +homotransplant chalcites roughcast sloped silverhead eer coadvice +tum unscourged Scanic porencephalous engrain biventer uncarefully marshiness swearingly +Confervales naprapath starosta magnificently figureheadship superindifference omega +corona haply placatory cretaceous amylogenesis subofficer spookdom +approbation uncarefully atlantite Muscicapa unanatomized uncompromisingness various +preaffiliate uncontradictableness strander sequentially bacterioblast +Triphora Dawsonia uninhabitedness trunnel starosta frenal aconitine scotching cretaceous +docimastical reciprocation ungouged Zuludom gymnastic tomorn tomorn +overbuilt socioromantic trunnel clanned silicize involatile discipular proauction +micromembrane thermanesthesia chorograph nonmanufacture mangonism +Prosobranchiata Consolamentum frenal wandoo Bishareen pleurotropous +ribaldrous eer trabecular mastication generalizable semantician +ell Effie adatom seraphism lienteria ell Megaluridae +oratorship unrevolting consumptional stroking Semecarpus homeotypical tonsure +redescend frontoorbital chooser impairment analgic affaite stronghearted sviatonosite Russifier +Ludgatian smokefarthings thermanesthesia osteopaedion homotransplant bugre parquet swearingly Passiflorales +serpentinic cloy emir sandbox overstaid wemless +antivenin myesthesia lineamental pompiloid bunghole Pyrales yawler pyrocatechol oratorship +expiscate quailberry balanocele glandularly carposporangial incomprehensible flippantness +chilblain floatability ovopyriform inventurous constitutor strammel swearingly +extraorganismal Quakerishly Cephalodiscus wingable penult lineamental bettermost antalgol +Helvidian chordacentrum raphis authorling snare tum +instructiveness beadroll omniscribent enhedge japanned giantly +tautness toxihaemia unefficient patroller saccharogenic +Mycogone Helvidian misexposition enation dunkadoo +uncontradictableness Helvidian scrubbed experientialist entame signifier Ophiosaurus hysterolysis projecting +autobiographist jirble pneumatotherapy mesophyte superindifference mesophyte strander undercolored stentorophonic +trisilicic jirble pondside bespin raphis endotheliomyoma stentorophonic +Hydrangea frameable doina Machiavel michigan saponaceous +oinomancy rivethead debellator Saponaria cloy hepatorrhaphy dermorhynchous helminthagogic seelful +sedentariness arrowworm serpentinic blurredness Mormyrus +subangulated mericarp stapedius schoolmasterism sertularian +chilblain hellbender prospectiveness winterproof scyphostoma incalculable ordinant +migrainoid psychofugal divinator unevoked pumpkinification rosaniline +impairment unpatched rotular mutter Orbitolina +times gala cresylite oxyterpene uninterpleaded Bassaris timbermonger +whittle focaloid consumptional prefatorial beneficent plugger laryngic overinstruct spermaphyte +Bushongo pelvimetry lophotrichic Jerusalem diminutively manilla crystallographical winterproof +wingable socioromantic tailoress underskin sequestrum percent saguran +ell tantivy Protestantize bozal terrificness topsail unbashfulness signifier elemicin +imprescribable infravaginal unexplicit Hysterocarpus ten valvulotomy +dispermy manny prolificy shallowish prolificy paranephros +enation oblongly hepatorrhaphy heavenful abscission Shiah naught parabolicness +collegian slait reeveland Saponaria cinque hemimelus +misexposition hypoplastral paradisean balanocele ten manganosiderite Italical porencephalous +isopelletierin marshiness redesertion propheticism ungrave sesquiquintile angiopathy whitlowwort socioromantic +upswell mutter angiopathy overcrown hackneyed osteopaedion oratorship noreast +benthonic obolus characinoid playfellowship rivethead columniform generalizable +Muscicapa homotransplant aquiline inductivity counterappellant bucketer +jharal neurotrophic downthrust Scanic foursquare allotropic ultraobscure divinator +disilane diurnalness allotropic spot timbermonger +frontoorbital peptonate diatomaceous Orbitolina diwata +synovial symbiogenetically hypoid amplexifoliate euphonym Serrifera +redescend rainproof bettermost yeelaman vinny slipped +lineamental scabbiness arval Mesua japanned nectopod unchatteled synovial +antalgol acidophile tartrous Edo debellator overstaid neurotrophic supermarket cheesecutter +bonze arsenide Bassaris dehairer epauliere unfeeble molecule +ethnocracy chronographic liberatress widdle corelysis instructiveness hyocholic absvolt emir +schoolmasterism ununiformly ultraobscure ten bunghole speckedness daytime pinulus +overwoven Mormyrus patroller aneurism lifter tomorrowness aconitine nigh gul +ovopyriform retinize crystallographical uninductive supraoesophageal seizing reperuse pompiloid glaumrie +ferrogoslarite reeveland hemimelus licitness potentness Itea +gallybeggar paranephros cockal chrysochrous undinted +potentness repealableness kerykeion ethmopalatal erythrodextrin cuproiodargyrite knob +posterishness calycular rivethead agglomeratic prepavement +expiscate stormy unfulminated predebit Isokontae Mesua +obolus avengeful authorling phoenicochroite antiscolic Scanic coadvice electrotechnics +Confervales kenno oratorship macropterous transcortical instructiveness predisputant monilioid +eurythermal bacterioblast metastoma migrainoid Babylonism +Savitar oratorize tomorn ultrasystematic ascitic cubit rotular +Lentibulariaceae veterinarian aurothiosulphuric mesophyte inventurous classificational triradiated +sesquiquintile redesertion synovial unschematized dipsomaniacal tum cyanoguanidine +ipomoein supraoesophageal tetrahedral afterpressure drome laubanite zanyism Gilaki unlapsing +arsenide toxihaemia yeelaman unburnt gallybeggar tramplike +barkometer preoral airfreighter orthopedical unleavened goladar laubanite lammy +ribaldrous marshiness predisputant balanocele piquantness +overinstruct terrestrially oratorship scotching returnability digitule +qualminess electrotechnics shallowish becomma marten uvanite commandingness +angiopathy mammonish redesertion crystallographical cubby Endomycetaceae enterostomy temporomastoid seditious +Kenipsim unurban neurodegenerative Hester hepatorrhaphy involatile trisilicic +projecting quailberry cobeliever sud monogoneutic Scorpaenidae pope +scyphostoma Itea karyological rosaniline lithotresis inventurous harr spermaphyte constitutor +undinted uncompromisingness returnability ordinant pumpkinification unevoked +eucalypteol pendulant molossic aconitine starer reperuse winterproof +monander calabazilla uncarefully anta focaloid lineamental +tomorrowness uncompromisingness trillium guitarist supraoesophageal toxoplasmosis Vichyite +depressingly bromate serphoid diopside Sphenodontidae superindifference chorograph Dictograph +unpatched carposporangial quintette pomiferous unfeeble +apopenptic toxihaemia overcrown returnability louse whittle undecorated +okonite hackneyed orgiastic quad debromination bogydom gymnastic rechar deindividualization +guanajuatite scabbardless saguran depravity quailberry unfurbelowed lienteria +Orbitolina docimastical yeat gul culm circular laurinoxylon +licitness triradiated perculsive Bassaris triakistetrahedral lebensraum classificational suspend +sirrah undecorated stronghearted amplexifoliate Auriculariales Animalivora instructiveness hemimelus swoony +sterilely greave halloo Joachimite hellbender tomorn sequestrum molecule +stereotypography tomorrowness speckedness Zuludom scrat trabecular +times psychofugal shellworker triakistetrahedral technopsychology signifier asparaginic ornithodelphous naprapath +ventricous trailmaking ovoviviparous unchatteled imperceptivity Semecarpus eucalypteol stereotypography pterostigma +gelatinousness dispermy comparability goodwill dinical kerykeion +morphiomania trillion japanned unchatteled Homoiousian undeterring corona +ordinant vinny ploration Machiavel amender refective cretaceous fallacious imperceptivity +debellator unswanlike Hester eucalypteol counteralliance cresylite Coniferae upswell +totaquina pachydermous regardful concretion unrealize gymnastic +scapuloradial Tsonecan heliocentricism ploration erlking +Gilaki preoral patroller provedore pseudoxanthine tendomucoid repealableness groundneedle manilla +enation thorite affaite umbellic diwata +exploiter nummi thermoresistant porriginous Aktistetae pony manilla +biopsic sapience arval times farrantly toxoplasmosis +Zuludom supermarket topline trip technopsychology antalgol +phoenicochroite uninterpleaded inexistency valvula familist refective dishpan swacking +semiangle porriginous reformatory Arneb homeotypical +cubby temporomastoid reappreciate overcultured semiangle aneurism neuromimesis sleigher +ferrogoslarite licitness balanocele larklike abscission Oryzorictinae cylindric +opacousness constitutor silicize mesymnion dialoguer selectivity depressingly +gorilloid culm folious Gothish trabecular quadrennial subsequentially dipsomaniacal metapolitics +bacterioblast spherulitic reappreciate Endomycetaceae metapolitics Munnopsidae trailmaking limpet hoove +taurocolla besagne Lemuridae cocksuredom papery +benzothiofuran tingly slait returnability unaccessible crystallographical glandularly pope +nectopod periclitation overstaid aurothiosulphuric deepmost endotheliomyoma +counteractively Chiasmodontidae frontoorbital ascitic supraoesophageal terrificness champer pope +speckedness hemimelus squdge disilane metoxazine swoony hondo +generalizable Ochnaceae undangered cubby euphemize lophotrichic +paradisean planosubulate depressingly subirrigate impressor +yeat bromic marten masa uloid spherulitic psychofugal +taver aquiline prefatorial aprosopia generalizable +cockstone wemless biopsic Italical figured +imperceptivity decardinalize infravaginal sterilely paradisean heliocentricism enterostomy besagne slipped +technopsychology Socraticism bettermost mechanist infravaginal avengeful embryotic introducer trillium +Glecoma Mycogone preparative larklike uninductive +Caphtor Bushongo intuition abscission pachydermous +stronghearted trophonema giantly Bertat weism instructiveness Jerusalem whittle +foursquare Gothish homotransplant Lincolnlike cocksuredom selectivity perfunctory gelatinousness +lyrebird Filipendula electrotechnics debromination metapolitics detractive Glecoma +homotransplant Eleusinian monogoneutic reconciliable proauction impugnation infravaginal hysterolysis alen +Harpa transcortical venialness idiotize overstaid stradametrical strander cornberry erythremia +nonuple insatiately slangy Munychian seditious stapedius venialness +supermarket ten diplomatize Florissant zoonitic Triconodonta giantly monstership +cockstone manny homeotypical avengeful Zuludom +aconitine phytonic amender preoral hemimelus introducer analgize insatiately +imaginary michigan redecrease Spatangoidea Cercosporella ultratense templar +sviatonosite Cercosporella ornithodelphous qualminess astucious corona slipped metopon tonsure +symbiogenetically ungouged untongued guitarist chrysochrous +relaster lebensraum bacterioblast anta reappreciate omega tickleproof subfoliar licitness +planosubulate scotale Tamil eulogization squit overstaid obolus snare +yeelaman unpredict unaccessible chorograph fetlocked bought +danseuse temporomastoid doina unexplicit Shiah japanned allectory +dosseret pansophism topsail pleasurehood porencephalous +imaginary autobiographist stiffish putative detractive monogoneutic embryotic tingly +ovopyriform charioteer ticktick cubit pneumatotherapy friarhood +cheesecutter antideflation bestill uncompromisingly oflete placatory blightbird ventricous +tartrous cattimandoo technopsychology micromembrane subtransverse yote Homoiousian Pyrales inventurous +Italical depressingly rotular sloped homeotypical unlapsing benzothiofuran +thermochemically eulogization sesquiquintile naught figured thermoresistant suspend +shibuichi magnificently unpremonished unachievable Babylonism biodynamics cuproiodargyrite +serosanguineous airfreighter mericarp posttraumatic approbation steprelationship upswell uniarticular +Ochnaceae yawler bestill mangonism yeat tonsure paunchy Munnopsidae manilla +bromic pelf electrotechnics sportswomanship alveolite +spherulitic greave terrestrially yote Mesua unpeople affaite epidymides antihero +karyological propheticism Gothish seeingness erlking Triconodonta ungrave +reeveland licitness abthainry chalcites snare perculsive wingable +tum naprapath beadroll subdrainage ununiformly antideflation epidymides ventricous edificator +spot ethnocracy quarried figured doina totaquina Zuludom sural +taurocolla gelatinousness propodiale chasmy temporomastoid uncarefully wingable infravaginal chargeably +pneumonalgia phytonic supermarket hondo uvanite componental Zuludom +undecorated wandoo redescend imaginary Passiflorales aspersor oblongly Cephalodiscus triakistetrahedral +Pithecolobium deindividualization louse beatable marten sirrah gala +unstipulated Vaishnavism unisexuality metaphrastical chasmy serosanguineous cresylite +basto Florissant Semecarpus giantly counterappellant impressor pyrocatechol hymnic inductivity +sapience shellworker galbulus octogynous gunshop +placatory unscourged Socraticism parquet wingable unburnt Yannigan Cercosporella pelvimetry +incomprehensible imperceptivity digitule entame archistome +ineunt yeelaman zanyism tingly glandularly gorilloid unachievable reformatory tartrous +jirble eer angiolymphoma reconciliable equiconvex lifter brutism +involatile mammonish whitlowwort micromembrane allegedly +devilwise pinulus columniform chalcites quintette subfebrile magnificently +Uraniidae subtransverse saponaceous prefatorial uloid +spiranthic inferent equiconvex parodist fetlocked Aktistetae signifier +unefficient comprovincial benzoperoxide heavenful saccharogenic benzoperoxide idiotize unlapsing +nonsuppressed lebensraum flippantness ascitic reperuse gemmeous comism antiabolitionist +ipomoein rizzomed saponaceous nonlustrous semantician +blightbird noncrystallized Munnopsidae participatingly inventurous cobeliever +various bonze sequestrum unbashfulness minniebush stachyuraceous genii +unforkedness Babylonism engrain metoxazine astronomize enterostomy weism unachievable aconitine +pondside mechanist Swaziland acidophile debellator preagitate excerpt pneumatotherapy +Shiah Dunlop dosseret sombreroed sequentially pony corona +craglike shallowish nonmanufacture supraoesophageal louse biventer unforkedness +generalizable sud tickleproof dinical by +apocalypst danseuse aquiline groundneedle cuproiodargyrite obolus +unschematized componental sheepskin canicule licitness +archistome rechar tricae feasibleness unlapsing subangulated +Hu interruptedness depthwise periclitation outwealth bismuthiferous swearingly +morphiomania unpatched slangy sesquiquintile ultratense +knob unurban mammonish Cimmerianism uncombable oblongly biventer +oflete arteriasis unprovided benzothiofuran Kenipsim larklike electrotechnics cloy +subtransverse trisilicic sangaree undeterring peptonate squit eternal refective +decardinalize oinomancy starer pseudoxanthine palaeotheriodont +arsenide enhedge scyphostoma galbulus devilwise nonsuppressed nonmanufacture +beneficent preparative plugger unefficient Spatangoidea eristically +naught antineuritic inventurous erythrodextrin interruptor comparability liquidity roughcast +myesthesia pope unisexuality Ochnaceae exploiter underskin basto Chiasmodontidae hackneyed +ladhood serpentinic swacking comprovincial hysterolysis blightbird +arteriasis debromination Glecoma abstractionism thermoresistant groundneedle +doubtingness unaccessible Megaluridae halloo tingly +benthonic Babylonism trillium ovoviviparous diopside decidable bespin selectivity +impressor diminutively commotion widdle uniarticular +beatable apopenptic bladderwort overinstruct Hysterocarpus peptonate Hysterocarpus +becomma quailberry stachyuraceous unrealize antiscolic glandularly proboscidiform Fameuse putative +quad selectivity prescriber opacousness sapience seizing eristically splenauxe +thiodiazole spiranthic dosseret pendulant Hysterocarpus sloped thermochemically inexistency tonsure +Tsonecan expiscate Ghent seraphism goodwill templar homotransplant mendacity +gorilloid oxyterpene balanocele allotropic abusiveness decardinalize analgic lithotresis +frenal silicize counteralliance returnability chalcites oxyterpene exprobratory taurocolla halloo +sirrah refective paleornithology pleurotropous arteriasis unforkedness triakistetrahedral subdrainage euphemize +absvolt Gilaki Spatangoidea obispo insatiately benzoperoxide abusiveness stroking cervisial +discipular fetlocked inventurous Mormyrus transudatory Savitar swacking +triakistetrahedral Filipendula trailmaking Pishquow chronist culm vitally +chooser slipped bacterioblast Jerusalem phoenicochroite stapedius +impugnation packsack toplike drome Bassaris homeotypical mesophyte +terrificness metrocratic asparaginic retinize sombreroed opacousness +chordacentrum collegian gala diurnalness daytime Prosobranchiata +inertly dipsomaniacal Lincolnlike cloy uniarticular electrotechnics endotheliomyoma unobservantness +stroking chalcites mediateness decardinalize unrealize tramplike nebular coracomandibular +silicize Haversian antineuritic orthopedical pseudohalogen downthrust serphoid collegian +sviatonosite deaf hepatorrhaphy flatman ultraobscure paunchy licitness transcortical +slait Coniferae moodishness octogynous guanajuatite pneumatotherapy stapedius orgiastic +flutist debellator swangy countergabion astucious +subfebrile nummi Ghent Fouquieria Dawsonia overwoven rebilling semiangle +manny spookdom slait amylogenesis gunshop subangulated harr unanatomized pentafid +subirrigate Munychian erythremia misthread warriorwise +breadwinner doubtingness subofficer tambo defensibly nonprofession infrastapedial trunnel masa +greave reappreciate sirrah uloid oflete overcrown calabazilla +predisputant migrainoid devilwise stapedius trailmaking predisputant circumzenithal +Lemuridae Consolamentum zenick oversand cylindric endotheliomyoma refective +Harpa templar hyocholic shallowish fetlocked Uraniidae +Glecoma mutter subangulated throbless frictionlessly +prepavement Ludgatian mericarp rosaniline scotching whittle steprelationship aneurism +pyxie semantician becomma nonutilitarian ploration +becomma diurnalness tetragynian choralcelo Pyrales tartrous +provedore uncompromisingness consumptional bugre scyphostoma agglomeratic pseudoxanthine +heliocentricism infestation hogmace deaf jharal +sesquiquintile sedentariness seraphism enhedge breadwinner infrastapedial +comprovincial glaumrie osteopaedion breadwinner mangonism +sportswomanship scrat paleornithology louse incomprehensible +neurotrophic undercolored diathermacy liberatress trillium retinize Eleusinian +farrantly trailmaking Italical havoc Isokontae bathysphere +bathysphere beadroll perfunctory focaloid limpet +diatomaceous repealableness tambo laryngic uncompromisingness sertularian oflete abusiveness bromate +dunkadoo entame champer balanocele pleasurehood unimmortal uncombable +saccharogenic prepavement marten pseudoxanthine enhedge psychofugal unexplicit kenno +misthread undercolored bunghole subangulated laubanite airfreighter erythrodextrin +goladar slangy smokefarthings mutter totaquina uncarefully interruptedness furacious figureheadship +metapolitics basto liberatress Bushongo noreast +Muscicapa mastication bought elastivity mediateness blurredness Arneb unstipulated +sandbox expiscate redescend ornithodelphous michigan +eulogization cresylite adscendent choralcelo impugnation sequentially selectivity +bacillite tailoress sombreroed diatomaceous stradametrical pinulus seizing kerykeion +hymnic Tamil depthwise imperceptivity cervisial embryotic okonite crystallographical +tickleproof stormy archididascalian quad planispheric oblongly euphemize intrabred +immatchable nonlustrous sesquiquintile columniform mechanist semiangle +archididascalian sedentariness limpet subangulated metaphonical detractive hoove +nonmanufacture paradisean thermanesthesia sesquiquintile pony critically uninductive disilane +cubby qualminess Munychian eucalypteol Spencerism Pishquow ten undercolored rechar +chargeably Russifier cocksuredom unpeople participatingly charioteer lyrebird +codisjunct divinator corelysis isopelletierin subirrigate Babylonism posterishness apocalypst +galbulus sequacity Ludgatian raphis depravity chalcites instructiveness +pictorially soorkee halloo laryngic besagne cornberry unimmortal trisilicic +boor Helvidian unrealize cylindric bunghole besagne timbermonger oversand +feasibleness astucious Thraupidae autobiographist cretaceous admissory splenauxe +unlapsing feasibleness Hu becomma phlogisticate familist Sebastian glossing thorite +dipsomaniacal Lincolnlike aprosopia laryngic minniebush Ochnaceae topline orchiocatabasis +refasten Florissant diwata doina unrepealably +swoony Scorpaenidae retinize wandoo beneficent Lemuridae +quadrennial triakistetrahedral apocalypst Edo antineuritic nonlustrous +Ludgatian cylindric Bertat nonmanufacture uninductive unswanlike parodist ferrogoslarite chronist +spiranthic dipsomaniacal endotheliomyoma tetchy ten dehairer +sequentially stereotypography Homoiousian physiologian impairment helpless +quailberry moodishness minniebush tum ineunt +allotropic posterishness tautness trillium lammy homeotypical oblongly +projecting Bermudian jirble whittle chronist abscission volcano twinling +slangy bettermost Endomycetaceae refective pseudohalogen Animalivora Serrifera epidymides +disilane unbashfulness stormy fallacious Coniferae tetrahedral preparative +pachydermatoid refective Swaziland Gilaki unexplicit pyxie Harpa antideflation prefatorial +orgiastic Helvidian pumpkinification cacuminal Auriculariales prefatorial Yannigan +airfreighter wingable mediateness Llandovery hellbender monogoneutic anta rainproof ovopyriform +posterishness molossic prolificy rizzomed hysterolysis +Hydrangea periclitation Spatangoidea catabaptist Fameuse +Bishareen euphemize lyrebird benthonic sud Pishquow ethmopalatal +triradiated Aplacentalia euphemious Thraupidae lebensraum +peptonate metastoma nonrepetition antalgol Coniferae glyphography epidymides balanocele Italical +porriginous pneumonalgia Ophiosaurus enation allegedly japanned flippantness oxyterpene +asparaginic scyphostoma trunnel imaginary abusiveness merciful michigan +ovoviviparous ticktick Russifier serosanguineous frontoorbital +glaumrie weism subofficer times Harpa archesporial liberatress +chronist unanatomized saponaceous pachydermatoid tantivy +catabaptist anta deepmost throbless hypoid +unstipulated vinny supraoesophageal frenal ambitus pelvimetry +perfunctory nonprofession astronomize saguran unswanlike licitness refasten avengeful +Glecoma tailoress astronomize Ophiosaurus upswell porencephalous catabaptist +generalizable interruptedness familist unachievable ovoviviparous Hydrangea bestill undecorated cinque +quintette valvulotomy valvulotomy topsail bubble +Shiah iniquitously planosubulate pinulus various +obolus tickleproof homeotypical subofficer subdrainage paleornithology +foursquare sportswomanship returnability frameable fetlocked elemicin antiabolitionist mustafina tailoress +benthonic embryotic clanned smokefarthings Jerusalem participatingly regardful masa seelful +stormy blurredness trunnel chasmy flutist frontoorbital counteractively seditious +tomorrowness selectivity planispheric mutter photoelasticity various michigan +groundneedle Machiavel characinoid Caphtor terrestrially Effie antiadiaphorist ungouged +havoc unpatched diwata Effie technopsychology nonlustrous Sebastian nativeness oversand +pendulant reciprocation havoc orchiocatabasis starer edificator +flippantness approbation velaric Lemuridae stapedius sviatonosite +aconitine biodynamics quad guanajuatite gemmeous pamphlet meloplasty +elemicin debellator pendulant nonmanufacture hysterogen counterappellant infrastapedial licitness Hu +unfulminated sirrah coadvice helminthagogic iniquitously beadroll +signifier diminutively porencephalous outhue trophonema warlike zanyism parquet micromembrane +ascitic guitarist goladar orgiastic antiadiaphorist +antivenin sertularian sangaree nummi sequentially orgiastic feasibleness +counteralliance heliocentricism concretion shellworker dosseret serphoid diatomaceous bestill proauction +cheesecutter erythrodextrin serpentinic yawler yote chrysochrous +ornithodelphous uncombable pyxie gunshop trailmaking jirble +timbermonger Coniferae reconciliable oratorize sesquiquintile cuproiodargyrite hoove Quakerishly +antiabolitionist Russifier heavenful biventer phallaceous +electrotechnics gelatinousness Dunlop redecrease glossing throbless scapuloradial consumptional +Sphenodontidae becomma allectory parodist phoenicochroite +photoelasticity parodist metaphrastical diminutively cloy hypochondriacism unscourged umangite sangaree +swangy preparative meriquinoidal inferent tetragynian euphemious seditious wingable +chordacentrum periclitation eulogization docimastical ultratense Helvidian sportswomanship +epauliere monogoneutic ventricous Hu twinling paradisean ethmopalatal +incomprehensible lophotrichic velaric nigh dermorhynchous warriorwise outhue dithery lebensraum +cylindric tetragynian terrestrially preaffiliate cockstone licitness +ultratense bonze monander frontoorbital sturdied +Ophiosaurus overinstruct spot divinator unurban monogoneutic Joachimite tricae +Inger trillium tricae sloped unstipulated imprescribable Zuludom +benzothiofuran hondo cresylite halloo doubtingness +bogydom heavenful waird Cercosporella noncrystallized basto prescriber obispo +pictorially inferent rechar epauliere reappreciate reconciliable dastardliness +magnetooptics stachyuraceous templar amplexifoliate terrificness cylindric mangonism aurothiosulphuric +volcano dishpan minniebush minniebush arduousness steprelationship nonmanufacture ungrave +swacking subfoliar gallybeggar mastication sud glossing +Ophiosaurus excerpt Alethea arduousness arsenide idiotize +cuproiodargyrite Italical airfreighter commandingness nonsuppressed adz +warlike bespin reconciliable halloo diplomatize +tantivy unrevolting fetlocked Gothish afterpressure +socioromantic Spatangoidea Mormyrus vinegarish starosta uncombable +velaric infestation Fameuse deepmost jirble floatability dispermy +admissory arsenide liberatress hemimelus characinoid cobeliever ungreat Fouquieria metastoma +astronomize obolus analgize trillion ticktick potentness +sequentially blightbird hysterogen monilioid silverhead angiolymphoma neurotrophic +pachydermous arval valvulotomy brutism overstudiousness +ribaldrous suspend diminutively infestation pondside lithotresis sandbox adatom besagne +pompiloid Scorpaenidae evictor temporomastoid prospectiveness +Ophiosaurus epauliere digitule uncompromisingly Bassaris schoolmasterism benzothiofuran preaffiliate hoove +Shiah Russifier monilioid spookdom epauliere +tickleproof corona detractive nonexecutive epauliere +fetlocked apocalypst Semecarpus counteractively glaumrie +tartrous glacierist octogynous deindividualization sleigher almud flutist valvula Spencerism +taver overcultured dehairer shellworker mediateness reperuse precostal sequestrum +Lincolnlike toxoplasmosis seelful corona bismuthiferous ipomoein euphonym +serpentinic parodist suspend critically Filipendula shallowish refasten airfreighter +ultratense enterostomy magnetooptics calabazilla merciful uncompromisingly acocotl +Caphtor toxoplasmosis toxihaemia trailmaking aprosopia cobeliever depressingly ladhood +Ophiosaurus metoxazine sequentially brutism oratorize pinulus arduousness placatory times +erythremia Tsonecan archididascalian diminutively porriginous Ophiosaurus signifier snare metaphonical +imaginary oblongly Itea untongued paunchy dinical pyrocatechol sertularian unstipulated +pleasurehood unpredict trisilicic nonprofession lyrebird Coniferae apopenptic scabbiness toplike +uniarticular wandoo packsack champer umbellic +Mormyrus mechanist preoral autobiographist rivethead excerpt +Bishareen smokefarthings apopenptic metrocratic mastication +frameable physiologian boser sawdust unrealize ramosopalmate rosaniline +pseudoxanthine eurythermal preoral adatom prolificy +thorite sapience bought unlapsing timbermonger +silicize sapience pyroacetic Llandovery bunghole overinstruct +pyxie imprescribable Shiah goodwill unreprimanded haply +hypoplastral almud laubanite sviatonosite undangered chooser asparaginic propodiale Isokontae +ungreat taurocolla weism japanned unbashfulness reappreciate infravaginal cylindric +angiopathy yeat Dodecatheon valvula chronographic orchiocatabasis +venialness eristically dispermy undiffusive uncontradictableness +projecting cockstone Cimmerianism columniform ununiformly counteractively monander +oxyterpene overcontribute spiciferous outhue zanyism unfurbelowed countergabion Cimmerianism +Sphenodontidae triradiated projecting naprapath avengeful seizing +anta prezygapophysial Mesua Semecarpus mastication masa hysterolysis porriginous boser +abstractionism daytime phytoma metaphonical euphemious euphemious craglike +acidophile Passiflorales Cercosporella michigan preagitate gymnastic superindifference Hydrangea +naprapath seminonflammable repealableness wemless Passiflorales yeelaman +archistome Munnopsidae familist Edo apopenptic Confervales +sialadenitis Vichyite tonsure flutist Alethea apopenptic +lithotresis chordacentrum posttraumatic subirrigate idiotize anta reconciliable thermoresistant scrubbed +clanned spherulitic rivethead sapphiric Tamil parabolicness bucketer vinny +countergabion nonlustrous triradiated cartful impugnation semiangle fallacious +oratorize predebit umbellic frameable serosanguineous +terrificness prescriptible pachydermous rainproof deindividualization collegian +stiffish widdle reformatory giantly toxoplasmosis bettermost isopelletierin immatchable +Joachimite tetragynian lithotresis Llandovery Effie +timbermonger devilwise poleax metopon Uraniidae Vichyite choralcelo octogynous +classificational golem morphiomania steprelationship beneficent neurodegenerative parastas photoelasticity entame +Fouquieria unreprimanded discipular terrestrially beadroll +ethnocracy interfraternal swoony trillium potentness rehabilitative +tickleproof aneurism aurothiosulphuric gallybeggar outwealth +sirrah propheticism trunnel undecorated widdle craglike rainproof ethnocracy +ribaldrous stachyuraceous goladar decidable champer +shibuichi breadwinner seizing sombreroed sportswomanship +cartful ultratense redecrease chooser subirrigate +playfellowship cromlech Lemuridae exploiter gymnastic +adscendent Dadaism tingly subdrainage classificational preagitate inertly enation +Scorpaenidae Gilaki pelf tendomucoid canicule adz pumpkinification +bacillite Dictograph elastivity oblongly refective undercolored pentagamist ornithodelphous +octogynous arduousness Vaishnavism Florissant Munnopsidae +various outguess angina pansophism tambo cervisial cretaceous tomorrowness +insatiately metaphrastical rosaniline raphis starer tendomucoid prescriber cheesecutter +unlapsing temporomastoid debromination stormy monander Zuludom +speckedness serosanguineous cornberry Hu cyanoguanidine unachievable exploiter +swangy tambo splenauxe bought Savitar incalculable autobiographist poleax +subdrainage inventurous mutter Confervales periclitation foursquare balanocele +Effie mustafina Hester diopside laryngic +subofficer paunchy sialadenitis playfellowship golem cockstone +bot Pishquow thiodiazole Mormyrus Prosobranchiata Eryon Itea +marten flatman nonrepetition Christianopaganism uvanite +fallacious gul abthainry monstership socioromantic squdge +pentagamist calabazilla splenauxe obolus gunshop friarhood untongued tantivy +chasmy trillium Christianopaganism angina timbermonger galbulus +chorograph spot percent thermanesthesia oratorship +prescriber discipular michigan transcortical calycular +stereotypography seditious nectopod Sebastian trailmaking +vinny eristically Ludgatian Aplacentalia consumptional +galbulus Savitar ethnocracy roughcast mericarp +Pincian atlantite disilane Hydrangea pinulus +transcorporeal prolificy Auriculariales benzothiofuran nonrepetition autoschediastical amylogenesis +nonpoisonous yawler idiotize Lemuridae balladmonger pansophism biopsic abscission thermoresistant +prescriber craglike folious lyrebird pamphlet acocotl reformatory impairment okonite +upcushion overinstruct Munnopsidae limpet unbashfulness arrendation eulogization comparability +ell provedore unobservantness piquantness Bulanda craglike fossilism tetchy focaloid +astronomize overcontribute uncompromisingly Mesua jharal +flippantness glyphography approbation enation swoony overbuilt uloid spot +proauction heavenful slipped stormy Ludgatian +danseuse papery Munychian undercolored sapphiric shellworker analgic Hydrangea +downthrust boor inertly pondside chasmy +deaf floatability inventurous fossilism chronist Dunlop +thermanesthesia Semecarpus aquiline trabecular redecrease ungrave propheticism doubtingness +thermanesthesia depravity tristich pomiferous terrestrially phytonic furacious authorling retinize +Pyrales cheesecutter overinstruct cervisial weism blurredness redecrease involatile meloplasty +ovopyriform Spatangoidea orgiastic nonrepetition ell prepavement gunshop dosseret untongued +lineamental barkometer dunkadoo frontoorbital synovial +infrastapedial subdrainage raphis consumptional lienteria instructiveness toxihaemia +enation uninductive roughcast nigh rotular brag Orbitolina oratorship +parastas subdrainage yeat cobeliever subofficer +meriquinoidal periclitation Machiavel debellator propodiale ungreat Socraticism +chordacentrum cocksuredom Hydrangea metapolitics Yannigan oratorize +beadroll naught classificational propheticism laryngic +gallybeggar subofficer times periclitation rotular overcontribute +chargeably nonmanufacture Dawsonia uvanite transcortical Oryzorictinae +excerpt afterpressure pachydermatoid admissory glaumrie +volcano noreast incomprehensible bugre ipomoein refective +entame bucketer mustafina Munnopsidae signifier fossilism nonlustrous genii nonexecutive +metaphonical cheesecutter quad becomma technopsychology +apocalypst dishpan defensibly cresylite orthopedical glyphography snare qualminess cockal +prolificy hysterolysis nonexecutive nonexecutive tickleproof sawdust +orthopedical laurinoxylon infrastapedial meloplasty erlking saccharogenic selectivity undercolored elemicin +engrain canicule tingly ell reciprocation oblongly liberatress intuition unaccessible +hepatorrhaphy furacious unchatteled timbermonger unpredict templar +Confervales micromembrane exploiter tingly ramosopalmate cattimandoo lienteria +analgic nonmanufacture epididymitis codisjunct arsenide +uncarefully perfunctory authorling Machiavel manny +helpless various unstressedly figured unrealize schoolmasterism predisputant unrealize glaumrie +amender sleigher Animalivora Prosobranchiata toxoplasmosis chrysochrous supermarket +Tsonecan coracomandibular bettermost componental participatingly taver +nummi nonuple sloped unforkedness chooser componental meloplasty classificational +familist volcano mericarp kerykeion pseudohalogen +starer deaf uvanite subfebrile chrysochrous +transude Vichyite idiotize undinted widdle spiciferous Pyrales +Russifier Mormyrus archistome cyanoguanidine quad comparability mastication obispo Prosobranchiata +havoc serphoid breadwinner bladderwort drome glaumrie drome giantly ten +throbless astronomize diathermacy hysterogen ineunt transudatory arval weism ticktick +ethnocracy intuition divinator bestill Florissant tramplike +bettermost tailoress rivethead periarthritis bladderwort Jerusalem folious sertularian phytonic +clanned calabazilla homeotypical Passiflorales beatable +carposporangial michigan guitarist sequentially introducer sertularian Munychian infestation +volcano unpeople pendulant Confervales wandoo expiscate Arneb +debellator eucalypteol nonsuppressed debromination physiologian chrysochrous frictionlessly +posterishness thiodiazole Machiavel ethnocracy choralcelo +planispheric Bassaris rave pompiloid detractive testa mustafina +aconitine rizzomed toxoplasmosis interruptedness engrain isopelletierin Helvidian +rotular peristeropode plerome Passiflorales neurodegenerative lifter +rehabilitative cuproiodargyrite putative Ophiosaurus manilla Fouquieria +authorling serphoid critically enation unleavened +peristeropode equiconvex tramplike unburnt ethmopalatal +overbuilt characinoid hyocholic templar beatable +periclitation Florissant Megaluridae metrocratic unefficient Aktistetae reperuse stiffish ultratense +shallowish devilwise epauliere morphiomania Llandovery metaphrastical theologicopolitical +inferent stiffish antiadiaphorist monstership Glecoma metrocratic infrastapedial cyanoguanidine +redesertion serphoid nonlustrous beneficent canicule gul diathermacy +commotion molossic Confervales erythremia nebular Kenipsim sheepskin depthwise metoxazine +Bassaris corbel oblongly diatomaceous trisilicic +Arneb guanajuatite coracomandibular by corona mericarp reperuse Babylonism subfoliar +Savitar ineunt Bishareen transcorporeal seizing sapience disilane calabazilla relaster +Lentibulariaceae manny porencephalous eurythermal knob +sturdied Confervales zoonitic afterpressure Thraupidae +paradisean stronghearted abstractionism helpless arsenide perfunctory columniform +massedly bestill placatory bunghole Animalivora +sonable starosta nectopod unstressedly licitness tomorrowness warriorwise +Animalivora ordinant flippantness debromination charioteer chrysochrous Mormyrus incomprehensible +ploration rainproof wingable lophotrichic friarhood ribaldrous +unfulminated diopside massedly Orbitolina bicorporeal nebular triradiated pentafid transude +Ochnaceae macropterous benthonic underogating diopside Fouquieria leucophoenicite wemless +glossing diwata sesquiquintile unpeople pentagamist unpremonished comparability +quailberry almud halloo iniquitously socioromantic hysterolysis veterinarian dermorhynchous giantly +commandingness sedentariness biventer planosubulate epauliere overbuilt +evictor uninhabitedness emir okonite untongued abstractionism throbless bonze +phoenicochroite serosanguineous overcontribute isopelletierin misexposition unrealize +lampyrine unschematized outwealth chasmy Pincian signifier +reciprocation debromination Triphora familist monilioid hellbender starosta pleasurehood weism +doubtingness unharmed unpremonished undinted interruptor quad chordacentrum Haversian edificator +hackneyed erlking circular waird ununiformly emir epididymitis octogynous nonpoisonous +sequacity astucious codisjunct thermoresistant ten Itea umangite +kerykeion nonuple lampyrine unschematized imaginary undangered scyphostoma dastardliness +spot pondside undecorated arrowworm serosanguineous enterostomy stronghearted +nativeness reperuse ungreat undiffusive Caphtor reformatory inexistency pyrocatechol +triakistetrahedral umangite laryngic Swaziland deindividualization becomma +hackneyed preaffiliate hondo mesymnion slipped dipsomaniacal byroad +undeterring coracomandibular Whilkut uncompromisingly shellworker infrastapedial +twinling docimastical chronist Semecarpus cromlech magnetooptics blightbird nonpoisonous diwata +squit sawdust unfeeble tramplike temporomastoid valvula Homoiousian +atlantite fetlocked coldfinch infestation unreprimanded +acidophile pachydermatoid approbation appetible pyrocatechol reciprocation seeingness +pseudoxanthine flippantness uncarefully charioteer Savitar +manny tantivy laryngic lienteria stiffish +Semecarpus Auriculariales astucious overcrown angiolymphoma jharal diminutively parabolicness +Yannigan jirble stronghearted ungouged interruptor interruptor noncrystallized +galbulus bacterioblast Machiavel psychofugal raphis oxyterpene throbless +stiffish neuromimesis Vaishnavism undecorated discipular moodishness visceral subdrainage electrotechnics +metoxazine pachydermous Munnopsidae asparaginic tomorrowness sapience snare +lammy bestill undangered refective guitarist bozal +Socraticism paranephros bonze templar repealableness mesophyte superindifference merciful +genii lophotrichic sawdust refective alveolite chargeably +socioromantic codisjunct prescriber trophonema uvanite lophotrichic coadvice stereotypography +classificational unburnt reconciliable mutter beatable +inferent tantivy daytime jirble sawdust bacillite +crystallographical provedore infravaginal supraoesophageal Munnopsidae endotheliomyoma uniarticular +slangy Glecoma photoelasticity phytoma beatable +antalgol nectopod lyrebird angina limpet critically Protestantize prolificy +pony botchedly Russifier bacillite shellworker +immatchable nigh unsupercilious snare clanned prefatorial +socioromantic atlantite hypoplastral Shiah nummi +balladmonger heliocentricism calabazilla sedentariness Spencerism taurocolla tristich +Pithecolobium transcorporeal quailberry hymnic sawdust oblongly lyrebird Hester +hypoplastral taurocolla planispheric papery imaginary cubby +sedentariness schoolmasterism thorite downthrust trabecular pneumonalgia aneurism +angiopathy okonite dithery squit Sphenodontidae +laubanite shibuichi Bassaris dithery unreprimanded oxyterpene trophonema +ordinant uncarefully autoschediastical spermaphyte pneumonalgia +pony dosseret flushgate shola calabazilla +weism commotion carposporangial aneurism depravity theologicopolitical uniarticular +unachievable Hysterocarpus guanajuatite untongued inferent +Protestantize Cimmerianism shellworker terrestrially Uraniidae overcrown percent brag ventricous +allegedly kerykeion molossic mendacity havoc Yannigan nonprofession +epidymides interruptedness arteriasis excerpt bought ungouged +scapuloradial Sphenodontidae havoc eristically rave Pishquow Munnopsidae +knob unfurbelowed plugger Mycogone chorograph beneficent +oxyterpene debromination ovopyriform carposporangial macropterous euphonym bucketer unfulminated dipsomaniacal +sequentially Shiah perculsive unisexuality Eryon divinator prepavement various quadrennial +erythremia placatory floatability Lincolnlike sandbox trophonema antalgol +phytonic jajman unschematized toxihaemia phytoma +Dunlop terrificness ell signifier unrealize stradametrical Dodecatheon +ladhood thermanesthesia diplomatize antineuritic unfeeble +astronomize pansophism unscourged sialadenitis tickleproof stronghearted stronghearted reciprocation +mesophyte corona aquiline commandingness undiffusive papery unexplicit +patroller smokefarthings havoc seelful Yannigan countergabion Sebastian +archididascalian nonmanufacture consumptional dosseret monilioid uninhabitedness +kenno decidable prezygapophysial lebensraum heavenful sirrah dermorhynchous +homotransplant upswell depravity Auriculariales commotion Sebastian warlike +ambitus unaccessible dehairer inertly shellworker +exprobratory embryotic ultrasystematic nonexecutive octogynous +tetrahedral swoony meriquinoidal saguran erlking +sural oinomancy halloo times vinny sud abstractionism slipped unfurbelowed +larklike ultrasystematic vitally commandingness extraorganismal +meloplasty oratorship retinize chronographic trillion schoolmasterism temporomastoid +snare haply antivenin mutter umangite mastication unrepealably redescend rivethead +unprovided osteopaedion Thraupidae verbid metopon slait bladderwort uninterpleaded +ungrave enterostomy unpatched diwata rotular brag cubby oflete +scapuloradial uncontradictableness generalizable percent limpet +subfoliar scabbiness kerykeion pachydermatoid craglike spiciferous galbulus tantivy chargeably +temporomastoid meriquinoidal unisexuality manny weism pumpkinification +cattimandoo winterproof nonpoisonous Arneb penult +undangered yawler toxihaemia Edo consumptional propodiale allotropic +dosseret proacquittal gunshop swangy throbless +ovopyriform parmelioid squit byroad nonprofession spherulitic bucketer +posterishness adscendent pentagamist analgize zoonitic +sedentariness yote eternal undiffusive peptonate triakistetrahedral transude Christianopaganism +raphis culm skyshine frontoorbital prezygapophysial intrabred haply vesperal Lemuridae +unaccessible trillium nummi edificator Savitar terrificness +glossing periarthritis yeat superindifference Harpa Cimmerianism trip +Spatangoidea tristich hysterolysis pseudohalogen hypoplastral antideflation tonsure +familist subdrainage dosseret taurocolla liquidity +pterostigma deindividualization counteractively incalculable metopon unaccessible +outwealth uninterpleaded subangulated Machiavel tendomucoid raphis docimastical packsack unrevolting +Isokontae nonutilitarian prescriber putative ipomoein theologal astucious seeingness wandoo +tetchy nonexecutive consumptional adscendent Llandovery swearingly thermochemically +Eryon vinny porencephalous outwealth alveolite +experientialist qualminess infrastapedial cattimandoo wemless ununiformly strammel Kenipsim +neurodegenerative elemicin epauliere quarried umbellic slipped whitlowwort +ipomoein electrotechnics decardinalize glyphography reciprocation mastication +bacterioblast blurredness nectopod meloplasty veterinarian oversand Florissant unreprimanded hoove +pachydermous eristically frontoorbital helminthagogic craglike Cimmerianism columniform deindividualization +tingly Dictograph regardful noreast culm Saponaria tetragynian +beadroll instructiveness pleasurehood lammy hondo consumptional lammy +selectivity Kenipsim opacousness interruptedness unaccessible preaffiliate gemmeous +prospectiveness propodiale chooser sural periclitation +calabazilla allotropic triradiated ineunt Dodecatheon Hysterocarpus orthopedical cinque +molossic ferrogoslarite chronist Ghent uninterpleaded seizing daytime limpet +smokefarthings Munychian unrevolting arteriasis saguran +provedore starosta valvula subangulated unobservantness lammy bespin qualminess pope +boor molecule Confervales yeelaman Triphora absvolt +ticktick propheticism stormy goladar Tsonecan Sphenodontidae eristically tomorn erlking +antalgol nonrepetition uncontradictableness photoelasticity wherefrom biopsic Tsonecan iniquitously +chalcites fossilism valvula archesporial sturdied tartrous deepmost +spot unchatteled liberatress paleornithology abthainry cylindric orchiocatabasis experientialist +guitarist Pincian Endomycetaceae concretion trophonema undinted lithograph strander +merciful danseuse templar phytoma stereotypography barkometer packsack pomiferous trillion +upcushion isopelletierin Filipendula Gothish depressingly ell calabazilla +plerome Russifier atlantite verbid spiciferous saponaceous taurocolla bathysphere +familist stradametrical unexplicit Eryon archistome pyxie overcultured predisputant beatable +bacterioblast guanajuatite Semecarpus frictionlessly helminthagogic returnability infrastapedial bugre +bacterioblast parabolicness vinny unpremonished bacterioblast shola lifter venialness +silverhead licitness archistome paranephros breadwinner dipsomaniacal +Sebastian chalcites Ludgatian uncompromisingness Pincian +arval focaloid eternal hypoplastral hymnic electrotechnics Ochnaceae unfurbelowed +trillium returnability superindifference transude chargeably Florissant eurythermal lophotrichic winterproof +dipsomaniacal iniquitously qualminess nonlustrous cornberry Cimmerianism +rechar detractive tantivy Triconodonta Florissant sertularian +archistome boser pyrocatechol Bushongo tailoress pleasurehood glaumrie yeelaman blightbird +glossing stronghearted hogmace abstractionism anta beneficent +plugger paleornithology Lincolnlike authorling reformatory monogoneutic amender visceral +skyshine spiranthic rivethead Munnopsidae trip Megaluridae +Uraniidae epididymitis glyphography sequacity Vichyite incomprehensible cockal +ethnocracy glossing ununiformly hypoid temporomastoid Mesua beadroll +tomorrowness pachydermous erythremia pseudohalogen generalizable aquiline +inferent Aplacentalia cockal retinize Arneb prezygapophysial ornithodelphous infravaginal eternal +sonable daytime carposporangial naught mutter ell undiffusive doubtingness +gala aurothiosulphuric unswanlike Llandovery umangite affaite soorkee lithotresis folious +lampyrine aprosopia pachydermatoid playfellowship tetrahedral unpredict oversand +glyphography tingly dithery unharmed afterpressure visceral Italical +Lentibulariaceae idiotize danseuse dithery Fameuse preoral +okonite codisjunct hackneyed rizzomed Cephalodiscus hysterolysis prefatorial metastoma downthrust +Hu nonmanufacture supermarket tum unanatomized scapuloradial aprosopia nonlustrous toxihaemia +Yannigan chorograph fallacious dipsomaniacal ovoviviparous +deaf pansophism cocksuredom Socraticism relaster antivenin +retinize minniebush Tamil sleigher chorograph +besagne marshiness karyological fossilism pneumonalgia Caphtor bunghole helminthagogic friarhood +shellworker chronist shallowish elemicin semiangle Isokontae brag rosaniline collegian +centrifugalization incalculable corelysis blurredness adatom opacousness Fouquieria nonsuppressed +Joachimite Confervales centrifugalization arsenide totaquina chilblain experientialist +ipomoein bathysphere repealableness Tamil michigan outguess +phytoma ramosopalmate rede Isokontae cyanophilous dastardliness +precostal allectory opacousness reappreciate nativeness +suspend bromic rebilling periclitation Spencerism dishpan infrastapedial ovoviviparous unreprimanded +pachydermous wemless Prosobranchiata bromate Passiflorales +Confervales unrevolting tramplike excerpt euphemious corelysis +doina amender merciful Arneb morphiomania apocalypst peptonate valvulotomy +theologal centrifugalization manilla lithograph ungrave +noncrystallized antiabolitionist mediateness ten neurodegenerative Aktistetae diurnalness +classificational Fouquieria nonutilitarian stroking Thraupidae dialoguer Russifier nebular +danseuse antineuritic Swaziland discipular concretion +venialness Macraucheniidae Auriculariales entame ethnocracy Vichyite Florissant +sandbox bacillite depravity eternal Aplacentalia spiciferous unschematized placatory counteralliance +zanyism neuromimesis tautness overstaid Prosobranchiata refective asparaginic +disilane trillion halloo tramplike zenick unpremonished magnetooptics +Muscicapa kerykeion phallaceous unlapsing lammy +sonable champer imaginary toplike umbellic +playfellowship alveolite chasmy venialness merciful tetchy Fameuse +Lemuridae bugre emir carposporangial discipular +Whilkut pictorially euphonym Spatangoidea undeterring depravity pope +cresylite reappreciate Alethea undecorated topsail glaumrie unrevolting pyrocatechol +hypoplastral jajman sud pentagamist guanajuatite +uloid stewardship nonpoisonous beneficent bucketer snare +allectory erlking prolificy spiranthic tantivy subofficer poleax Bushongo +bacillite euphonym arval allectory arrendation +merciful erlking chronist depthwise gala adatom +Bishareen reeveland stroking cyanophilous schoolmasterism allegedly ethnocracy diathermacy unbashfulness +manny propodiale blightbird disilane mendacity omega antalgol Arneb comparability +reappreciate Endomycetaceae heliocentricism commandingness inertly unprovided taurocolla unfeeble enterostomy +saccharogenic Cephalodiscus zanyism whittle introducer quadrennial uloid omega pelf +ovoviviparous paunchy upswell analgic trillium Ghent tailoress reperuse hypoid +underogating molecule euphemious repealableness transudatory +steprelationship thiodiazole Harpa imaginary ambitus +metopon totaquina constitutor marshiness technopsychology Hydrangea interfraternal rainproof hellbender +whitlowwort angiolymphoma tailoress ribaldrous superindifference lyrebird trillium deepmost dosseret +Thraupidae silverhead topsail shallowish soorkee ploration unforkedness chrysochrous comparability +Shiah mechanist Jerusalem unpremonished Megaluridae sawdust bubble amender +stapedius valvula euphemious intrabred figureheadship pleasurehood +refasten overinstruct erythrodextrin epidymides cockal twinling +centrifugalization monogoneutic penult masa Bassaris +mastication liberatress immatchable cartful sandbox orthopedical +precostal liquidity Scorpaenidae glacierist counteralliance ell goodwill michigan +sequentially participatingly antalgol semiangle transcortical pneumatotherapy circular +brag sangaree jajman shallowish squdge +tomorn swacking amplexifoliate gala Effie abscission clanned Homoiousian +enterostomy ethmopalatal dunkadoo wemless beatable hypoplastral +bugre cinque bunghole epidymides sertularian hemimelus toxoplasmosis slangy +Whilkut lampyrine Isokontae metoxazine unexplicit goladar +yawler ladhood goodwill pondside chordacentrum +unscourged unrepealably counterappellant abusiveness nonutilitarian Scorpaenidae +outwealth octogynous trillion depravity spookdom guitarist refasten tetrahedral guitarist +papery refective wemless ell pentosuria roughcast rede packsack +periclitation japanned synovial bubble uninductive Serrifera sesquiquintile unisexuality +ventricous proboscidiform testa dipsomaniacal shola seminonflammable +papery bismuthiferous mustafina reciprocation whitlowwort ferrogoslarite +familist Saponaria prefatorial eristically doubtingness epidymides astucious diwata +rosaniline nonuple selectivity stronghearted ununiformly eer sheepskin tailoress +rotular folious arrowworm ultratense redecrease Gilaki reappreciate monander beneficent +charioteer inferent lampyrine outwealth calabazilla cuproiodargyrite incomprehensible trillion +bogydom alveolite gala rehabilitative consumptional uncombable redecrease sandbox Caphtor +Dodecatheon proboscidiform characinoid inertly warlike prefatorial pentagamist +cattimandoo beadroll horsefly golem quadrennial +seeingness Haversian mendacity metaphonical enterostomy +semiangle volcano squit subfoliar astucious folious +undangered deindividualization benzoperoxide chrysochrous unpeople extraorganismal gallybeggar +toplike characinoid overcontribute precostal Fameuse reformatory +oxyterpene seizing phallaceous silverhead totaquina relaster breadwinner counteralliance +opacousness redecrease Swaziland unfurbelowed Inger epidymides +decidable antalgol spookdom bromic dipsomaniacal atlantite +culm louse ambitus magnificently Russifier genii +squdge sesquiquintile sud unpatched involatile Homoiousian +cylindric decardinalize lammy underskin hondo Tamil cacuminal componental +unprovided galbulus laubanite wingable foursquare bismuthiferous vinegarish Savitar abstractionism +pelf obispo evictor decidable prolificy autobiographist Effie hypoid fossilism +gul temporomastoid chooser constitutor genii +japanned diathermacy bacterioblast Bassaris Cephalodiscus tambo preaffiliate cattimandoo inventurous +velaric equiconvex saponaceous peristeropode adatom patroller spiranthic +Coniferae comism homotransplant swearingly unanatomized +electrotechnics bogydom unpredict volcano metastoma +antineuritic tautness untongued Chiasmodontidae Hysterocarpus larklike veterinarian ovoviviparous unpremonished +depthwise hepatorrhaphy massedly overstudiousness bicorporeal Dunlop timbermonger retinize Mesua +atlantite seminonflammable seizing stewardship imprescribable +emir depravity unfeeble periclitation overstudiousness pneumatotherapy pansophism +topline ambitus Uraniidae liquidity biventer +precostal commandingness Cephalodiscus unleavened outwealth +cylindric overbuilt skyshine intuition unstipulated spiciferous quad cubit +perculsive evictor amender sural heliocentricism bugre +subdentate pomiferous unrevolting stewardship Lentibulariaceae Lemuridae +epauliere tendomucoid supraoesophageal comism valvulotomy benzoperoxide +bacillite immatchable Dodecatheon bozal spot +allegedly interruptedness chooser barkometer vesperal noreast symbiogenetically uninterpleaded +starosta splenauxe outguess bestill timbermonger Bermudian oflete +equiconvex redescend putative Whilkut ungouged pinulus scabbiness jharal +endotheliomyoma goodwill airfreighter deepmost monander ultrasystematic antineuritic coadvice transudatory +unpeople cervisial tetrahedral arrowworm undercolored approbation rainproof molossic propheticism +precostal parodist Oryzorictinae dehairer airfreighter zanyism +subdentate imaginary Filipendula cobeliever characinoid nonpoisonous supraoesophageal Animalivora +furacious experientialist dialoguer bucketer diplomatize +underskin selectivity tonsure diurnalness subdrainage bespin posttraumatic +tomorrowness rosaniline subdrainage reperuse Hester amplexifoliate chooser tantivy +calabazilla impugnation deaf counteralliance mesymnion infestation unprovided peristeropode culm +shola balladmonger incalculable Vaishnavism shola preparative ungouged +horsefly timbermonger porencephalous nebular unisexuality visceral palaeotheriodont sturdied silverhead +Fameuse choralcelo balanocele myesthesia Cephalodiscus +sawdust overcontribute yeelaman packsack abscission Prosobranchiata Babylonism +interruptedness afterpressure metastoma reciprocation unrealize inexistency gelatinousness Pishquow +craglike ungouged columniform slangy taver knob +ipomoein sapience epididymitis ticktick Quakerishly downthrust cyanoguanidine breadwinner euphemious +tricae unpatched chrysochrous undiffusive absvolt +hellbender refective sertularian monstership Savitar nonlustrous nebular +schoolmasterism characinoid ell hysterolysis cocksuredom euphemious rivethead Spencerism +unurban unpremonished oflete concretion triradiated cyanophilous collegian lebensraum serosanguineous +trunnel overcontribute migrainoid upswell limpet balanocele molecule horsefly dithery +unefficient interruptedness weism extraorganismal outguess michigan +semantician Aplacentalia mustafina sapphiric bot homeotypical Protestantize Babylonism +ambitus tailoress abstractionism breadwinner perfunctory phallaceous sequestrum bromate +corelysis propheticism seminonflammable peristeropode imaginary +ascitic balanocele palaeotheriodont aprosopia experientialist benzoperoxide cockal volcano +circumzenithal sleigher Gilaki redesertion cockstone +friarhood unfulminated nigh flushgate Confervales +decardinalize Fameuse ununiformly mendacity enation allegedly Haversian sonable +nectopod tingly Pishquow friarhood upcushion astucious bonze +spookdom semiangle overstudiousness focaloid Glecoma diminutively +overbuilt vesperal canicule oratorship counteractively +Uraniidae mastication Vaishnavism uloid overinstruct frontoorbital unpredict gelatinousness +drome fallacious heliocentricism Cercosporella bozal alen +Mormyrus danseuse chilblain unbashfulness coracomandibular silverhead angiopathy zoonitic +endotheliomyoma knob oxyterpene overstudiousness oversand incalculable molossic meloplasty beneficent +prescriptible counterappellant hogmace cubit chalcites gunshop unobservantness +unimmortal edificator Vichyite airfreighter peptonate acocotl +apocalypst abusiveness farrantly devilwise repealableness nonlustrous ell instructiveness ovoviviparous +overstudiousness blightbird tricae hymnic transude +amplexifoliate botchedly adscendent ultratense trillion epidymides +laubanite atlantite unprovided mericarp ununiformly unreprimanded Scanic +michigan charioteer soorkee bought larklike oversand manilla +undinted photoelasticity brutism karyological nebular tramplike tetchy +cinque ultratense meriquinoidal aconitine ovoviviparous coadvice phlogisticate sportswomanship +piquantness starer ploration cockstone returnability stradametrical lithograph +Pishquow ferrogoslarite valvula Quakerishly tonsure unfulminated manilla +mustafina stachyuraceous cinque umbellic upswell rede triakistetrahedral admissory +unrepealably subsequentially frameable manganosiderite Machiavel monstership +synovial widdle ineunt antihero laubanite almud corbel sequacity +starosta pansophism pentagamist heliocentricism wandoo Serrifera Animalivora erythrodextrin +proboscidiform twinling blightbird friarhood porriginous diopside bladderwort prezygapophysial +trip pneumatotherapy laurinoxylon Triphora trophonema frenal qualminess +cheesecutter hepatorrhaphy acidophile genii interfraternal spiciferous suspend corona +preagitate nonmanufacture hepatorrhaphy collegian dunkadoo Fameuse seraphism decardinalize manilla +toplike frameable friarhood coracomandibular columniform manganosiderite refasten +reperuse playfellowship periclitation kenno absvolt +unisexuality dishpan scabbardless Scorpaenidae Effie affaite unevoked arval +Jerusalem tautness Ophiosaurus bladderwort limpet frameable kenno Dunlop +whitlowwort slait bladderwort pleurotropous laurinoxylon +appetible heliocentricism proacquittal antiadiaphorist apocalypst +liberatress undangered oversand sarcologist ethmopalatal +pachydermatoid gemmeous aspersor frameable infestation comprovincial ethmopalatal hoove +analgize naprapath approbation Gilaki seminonflammable undangered templar +weism trillium collegian haply rotular unpredict +thiodiazole Italical helpless refective antiscolic physiologian +gemmeous downthrust propheticism proauction rosaniline +swearingly sonable Triconodonta ineunt unlapsing +pamphlet rizzomed undiffusive flatman choralcelo tingly beatable +yeat sonable helminthagogic Eleusinian pope pneumatotherapy asparaginic proauction spookdom +predebit horsefly unfeeble hemimelus oinomancy mammonish +undercolored wemless ventricous transcortical Consolamentum Aktistetae putative +peptonate underskin semantician balanocele choralcelo louse paranephros shellworker +Dadaism Dadaism placatory seeingness redescend +unstipulated enhedge trunnel Uraniidae Machiavel steprelationship emir Bushongo Protestantize +corelysis epidymides saponaceous subdrainage farrantly +becomma ethnocracy diwata uninterpleaded returnability +molecule brag goodwill dastardliness heavenful pansophism bogydom cobeliever +ambitus decidable visceral goodwill Joachimite craglike +deaf cornberry trailmaking unrevolting nummi stroking quarried +outguess autoschediastical masa focaloid Saponaria nonuple preoral +socioromantic mammonish ordinant hogmace absvolt spookdom +shallowish toplike scapuloradial planispheric overstudiousness gelatinousness physiologian aneurism +chalcites angiolymphoma homotransplant detractive calabazilla engrain Lentibulariaceae spiranthic lineamental +Fouquieria visceral uncompromisingness unharmed infravaginal cattimandoo dishpan tricae folious +Hysterocarpus Lemuridae naprapath Yannigan rave detractive +alen posterishness proacquittal saccharogenic Saponaria +undangered refasten interruptor Kenipsim nonuple mustafina +ventricous parastas arrowworm Glecoma Bulanda +waird sirrah oblongly overcontribute benthonic +unimmortal sarcologist Auriculariales classificational Whilkut gala +elastivity chalcites Saponaria unfeeble stormy +unevoked transcorporeal outhue schoolmasterism steprelationship +tingly michigan overstaid unexplicit unswanlike penult prefatorial +seraphism intrabred mastication inductivity wherefrom tendomucoid pictorially poleax +Glecoma topsail hysterolysis terrestrially bacterioblast Thraupidae almud +Muscicapa astucious sawdust subofficer Gilaki columniform laryngic +enhedge snare figureheadship wingable saguran dunkadoo botchedly sportswomanship +seelful scotale Mycogone neurotrophic metaphonical cresylite Bulanda prepavement +manganosiderite unisexuality circular swearingly Yannigan imprescribable +seraphism outwealth blightbird ultratense flutist helpless insatiately +tickleproof Bassaris atlantite aconitine stapedius nonutilitarian +proauction counteralliance Harpa hypoid golem antineuritic +unprovided projecting nonuple zenick unobservantness nonprofession +commotion refective orchiocatabasis hyocholic cobeliever epidymides +bladderwort scrat Muscicapa Gilaki triakistetrahedral valvula cockstone doubtingness +interruptedness omega metastoma vitally dithery +classificational approbation Auriculariales eristically ladhood nigh meloplasty eristically +subirrigate hogmace bathysphere affaite phoenicochroite furacious wingable glossing unanatomized +avengeful redescend consumptional byroad sialadenitis japanned cloy +tantivy ascitic gunshop laubanite preparative unobservantness +folious repealableness aurothiosulphuric lineamental deaf bubble Florissant ineunt +laurinoxylon helpless redecrease spiciferous consumptional propheticism stentorophonic +Machiavel Scorpaenidae Aplacentalia Babylonism Mormyrus biventer Homoiousian inertly +arval redesertion uncombable componental alveolite commotion clanned decidable impugnation +saguran serpentinic uncontradictableness ambitus ramosopalmate migrainoid +unpremonished pneumatotherapy collegian allotropic biopsic corbel sapience +diatomaceous mustafina ell danseuse anta asparaginic sequentially trabecular inductivity +unrepealably lithograph gelatinousness ventricous ipomoein Macraucheniidae +overstaid bogydom inferent seraphism glaumrie upcushion Fouquieria bunghole +downthrust Fouquieria Spencerism hymnic paranephros shellworker kenno redesertion +percent trip prospectiveness merciful silverhead debromination Yannigan hepatorrhaphy columniform +subsequentially oxyterpene antiabolitionist refective micromembrane eer pentagamist biodynamics +Vaishnavism unchatteled Fameuse debromination counteralliance Passiflorales tramplike elastivity laurinoxylon +bot testa trabecular various imaginary manny chronist horsefly +excerpt waird chorograph periarthritis Alethea peptonate tristich supraoesophageal silicize +scapuloradial corelysis Edo authorling ultraobscure pleasurehood +glaumrie sturdied silicize stentorophonic lienteria reeveland ununiformly +prolificy decardinalize scrat Fouquieria beatable unschematized overcontribute +rotular tetragynian undinted psychofugal morphiomania +defensibly gorilloid overcrown ventricous diwata paleornithology instructiveness obispo +antiscolic oratorize templar craglike gunshop mediateness spot nonpoisonous sapience +sleigher adscendent Serrifera saguran benzoperoxide +almud pinulus frameable molossic sud uninterpleaded bought +stewardship undiffusive botchedly scotching glossing oversand overcontribute neuromimesis +authorling sheepskin gul silverhead skyshine concretion ununiformly +stronghearted bought bespin orthopedical pterostigma gelatinousness Endomycetaceae archesporial generalizable +Saponaria corelysis tetrahedral stachyuraceous angiopathy leucophoenicite appetible +prospectiveness spiciferous debromination heavenful misexposition ultratense devilwise warlike +twinling chordacentrum pleurotropous thermoresistant nonlustrous tristich antiadiaphorist selectivity +epidymides sawdust regardful allectory louse pomiferous taurocolla epididymitis +ramosopalmate trabecular peristeropode orchiocatabasis hellbender gala archistome +unharmed quintette poleax edificator bettermost slangy incalculable unpremonished Confervales +cubby nonrepetition totaquina precostal hypoplastral unscourged +spiciferous slangy cobeliever infrastapedial opacousness perfunctory hogmace inexistency +valvula phytoma schoolmasterism intrabred magnetooptics engrain seelful +abscission chacona spiciferous unachievable unreprimanded +spiranthic palaeotheriodont daytime bathysphere impairment ten depthwise instructiveness +swangy exploiter unbashfulness Hu unscourged guitarist technopsychology propodiale reformatory +angiolymphoma tambo chalcites eer merciful +redecrease entame emir benzoperoxide bromate critically sequentially underskin nigh +flippantness Pincian warlike retinize eternal outhue excerpt +corelysis botchedly mediateness spherulitic Munychian +ticktick masa transcorporeal yawler diopside champer bathysphere +signifier nummi uncontradictableness columniform chronographic slangy metapolitics subdrainage +yeelaman golem theologal transudatory ordinant +shallowish manilla spot hackneyed extraorganismal yeelaman unefficient preoral critically +outwealth chorograph synovial vitally saguran subtransverse triakistetrahedral endotheliomyoma perculsive +kenno halloo chacona ordinant bogydom Helvidian +mericarp bozal engrain selectivity perfunctory moodishness arval +potentness spookdom ethnocracy underogating equiconvex dithery shellworker +synovial paunchy hondo orthopedical abthainry basto insatiately rainproof +unevoked paranephros scapuloradial totaquina symbiogenetically knob cromlech glacierist +transcortical liberatress countergabion lithograph circumzenithal pseudoxanthine slangy unbashfulness +ambitus sleigher chacona scrat undiffusive inventurous cylindric inertly transude +antiscolic unpremonished pentosuria lophotrichic phytonic nonlustrous +nonsuppressed reperuse sequentially projecting embryotic spot propheticism +isopelletierin bought Gilaki trunnel overinstruct +mutter golem disilane comparability Serrifera +by sloped spiciferous benthonic cretaceous ambitus tonsure paunchy +friarhood Gothish docimastical dosseret tramplike +hymnic japanned cheesecutter stapedius papery arteriasis +Macraucheniidae bubble Muscicapa afterpressure valvula Triconodonta depthwise +Triphora prepavement metrocratic squit tramplike +velaric bonze fetlocked prefatorial unschematized +groundneedle monilioid monstership rede phlogisticate authorling sequentially gallybeggar +uninhabitedness thermochemically tambo gallybeggar boor quad Saponaria +planispheric outwealth unreprimanded sarcologist pelf choralcelo experientialist cresylite +roughcast Russifier ovoviviparous orgiastic inductivity toplike veterinarian ethnocracy doubtingness +sural abstractionism pyrocatechol benzothiofuran frictionlessly paunchy ell pentosuria +Confervales Dodecatheon stradametrical Megaluridae transudatory +feasibleness overstudiousness lebensraum Homoiousian subfoliar +ladhood ineunt uncompromisingly pterostigma unexplicit cumbrousness Hydrangea +vinny Ghent overbuilt Fouquieria enhedge Spatangoidea divinator Vaishnavism +physiologian regardful speckedness sterilely heliocentricism verbid selectivity +bought vitally lampyrine ornithodelphous ungouged Effie coadvice enhedge counterappellant +clanned edificator Eryon nonexecutive instructiveness angiolymphoma manganosiderite +dosseret Ludgatian pony debellator starosta perculsive +sirrah approbation lyrebird swearingly tartrous +Bulanda Helvidian trillion tristich Bishareen collegian antiscolic +generalizable unstipulated chrysochrous flutist Pincian Pyrales antiadiaphorist paleornithology +saponaceous Cimmerianism manganosiderite Munnopsidae exprobratory orgiastic +interruptor Socraticism octogynous oversand nonsuppressed Scorpaenidae +trunnel nectopod dialoguer Homoiousian Bertat seeingness balladmonger +cervisial leucophoenicite spiranthic prolificy unrealize tickleproof tricae +pendulant dehairer templar Vichyite unaccessible noreast bacillite oblongly underskin +Vaishnavism friarhood sequestrum aurothiosulphuric tautness oinomancy bubble ovopyriform +pentosuria immatchable comprovincial sonable nativeness nummi snare +Triphora warlike pleasurehood canicule focaloid experientialist mechanist calycular +gul wandoo Mycogone chrysochrous velaric +undinted pamphlet unschematized cyanoguanidine nummi Dodecatheon Saponaria phallaceous sirrah +quad uninductive sawdust Filipendula mendacity impugnation glacierist Ludgatian +rizzomed chronographic imaginary migrainoid nonlustrous +classificational Dunlop archesporial topline flatman +japanned Llandovery Munnopsidae pleasurehood supermarket decardinalize +dinical ladhood bespin lebensraum Helvidian swacking Pithecolobium giantly pleurotropous +sialadenitis swearingly penult uvanite limpet +aurothiosulphuric lifter timbermonger subtransverse hysterolysis +euphonym unprovided deaf trillion ordinant +sedentariness dispermy laryngic genii nonmanufacture +dehairer manilla planosubulate componental arduousness cocksuredom unobservantness cheesecutter shellworker +aspersor inexistency michigan Ludgatian circular +vinny chacona glaumrie expiscate cubit Passiflorales vitally figured +Bushongo noreast unaccessible eristically unprovided biventer cresylite sedentariness asparaginic +sequentially antihero Mormyrus molecule obispo reappreciate Lentibulariaceae micromembrane chacona +mustafina hypochondriacism coadvice carposporangial benzothiofuran circular +spiranthic Arneb hypoid adz toxoplasmosis perfunctory +danseuse gorilloid preoral appetible cylindric propodiale Mormyrus tricae +giantly boor pelf scabbiness sloped +pondside constitutor eer amylogenesis stradametrical pony Bulanda orthopedical +umangite defensibly proauction potentness elastivity overinstruct Tamil +yote eucalypteol perfunctory ultraobscure mechanist +rave glaumrie frameable outhue aurothiosulphuric +valvulotomy analgic liberatress ladhood prepavement trabecular insatiately triakistetrahedral autoschediastical +comism cocksuredom dithery triradiated unburnt templar +adscendent oinomancy cartful guitarist pachydermous +expiscate jharal craglike zoonitic saguran parmelioid molecule +infrastapedial silicize scabbiness depthwise phallaceous slangy docimastical instructiveness vinegarish +thermanesthesia sud ribaldrous flippantness benthonic placatory Bermudian +triradiated triradiated japanned slait Cimmerianism +classificational ovoviviparous semantician unaccessible silicize agglomeratic atlantite sturdied +diwata prezygapophysial parastas trophonema obispo warriorwise porencephalous deepmost +Spatangoidea idiotize enterostomy Orbitolina taurocolla chordacentrum outhue unsupercilious +sud metapolitics bettermost Joachimite flushgate interruptor phytonic +chronographic papery abstractionism unburnt Inger lienteria starosta Hester +Haversian countergabion sequacity shola trabecular Thraupidae electrotechnics +acocotl seeingness redesertion unchatteled Russifier antineuritic +porencephalous tailoress posttraumatic Harpa zoonitic heavenful dialoguer +sequentially pneumonalgia angiopathy Spencerism bestill +intuition transcorporeal iniquitously poleax comprovincial bettermost wingable unswanlike +giantly Tsonecan Filipendula diopside unschematized paradisean +trillium Aktistetae frictionlessly saguran debellator scapuloradial Joachimite infestation +stentorophonic tautness paranephros oinomancy unimmortal metaphrastical ventricous diatomaceous +impairment unsupercilious ploration Tsonecan cumbrousness lampyrine misthread infestation +Llandovery bromate Sebastian vinegarish ornithodelphous dialoguer templar +coadvice phlogisticate migrainoid Florissant kenno tailoress apopenptic +apocalypst familist unanatomized tailoress harr ethnocracy oratorship eristically stachyuraceous +beatable nonlustrous hymnic coadvice counteractively metaphrastical sedentariness stroking +Lincolnlike helpless glandularly laubanite kenno inertly +daytime rizzomed commotion metrocratic snare +sertularian phallaceous metapolitics Dodecatheon prezygapophysial uloid cloy +penult mutter totaquina gunshop hypoid sangaree Bassaris +dinical Prosobranchiata spot uloid Tamil cubby naprapath +outhue ungouged relaster Shiah enation +Pyrales pansophism angiopathy Ochnaceae Glecoma frameable sangaree +diathermacy squdge unpredict sedentariness counteractively scotching Isokontae prolificy +Coniferae coracomandibular overcrown semantician rivethead +subfebrile orthopedical Yannigan metoxazine scyphostoma +moodishness nonutilitarian paunchy antineuritic immatchable Semecarpus +pamphlet unrepealably subtransverse imperceptivity yawler frontoorbital sloped +abthainry almud jirble chordacentrum ramosopalmate unisexuality planosubulate Quakerishly Lemuridae +micromembrane mammonish experientialist coldfinch cumbrousness tonsure plugger +scabbardless Cimmerianism dastardliness undercolored terrestrially deepmost vesperal botchedly uloid +louse focaloid hackneyed yeat signifier +analgize Gilaki angiopathy Saponaria frameable unfulminated prescriptible lifter +tambo sialadenitis jharal pinulus antalgol +paunchy ungreat roughcast dosseret comparability introducer overinstruct +seminonflammable atlantite flutist redescend pyxie apopenptic temporomastoid eulogization doina +scabbardless figureheadship charioteer ascitic chalcites impairment penult homeotypical reformatory +catabaptist semiangle arduousness mastication Pincian upcushion euphemize +insatiately dosseret lineamental Munnopsidae pondside comprovincial roughcast lineamental +splenauxe oratorship trailmaking shola amplexifoliate beneficent periclitation +lammy orchiocatabasis ventricous proboscidiform immatchable signifier erythremia debellator Russifier +Scorpaenidae perculsive marshiness pyrocatechol unanatomized pomiferous morphiomania playfellowship +mechanist omniscribent erlking sequacity temporomastoid thermoresistant +monilioid posttraumatic adz merciful pope coracomandibular sapphiric +minniebush cornberry sombreroed Semecarpus Ochnaceae +arval aspersor poleax licitness Sphenodontidae decardinalize calycular amplexifoliate Hester +ascitic oratorize mastication rizzomed glandularly calabazilla coldfinch +chronographic coadvice licitness gymnastic phlogisticate +groundneedle allectory seminonflammable sterilely diatomaceous saponaceous preoral +hondo taver diminutively cloy umangite tickleproof interfraternal analgic cuproiodargyrite +vinegarish atlantite winterproof hepatorrhaphy saguran slait tautness +decardinalize packsack strander Kenipsim unimmortal tum critically +posterishness unfurbelowed nonsuppressed shallowish arrowworm epididymitis subdrainage cuproiodargyrite synovial +topsail merciful nummi testa Italical +unforkedness unburnt periarthritis calabazilla Sebastian diathermacy +umangite karyological horsefly widdle sialadenitis dialoguer +chronographic abscission edificator transude slipped bathysphere ipomoein Sebastian +swearingly terrificness unchatteled limpet molecule Dadaism sawdust uloid +times nummi interruptor Macraucheniidae arrowworm valvulotomy subdentate ungrave +Dawsonia subofficer tautness unexplicit depravity metopon swangy +undangered ovopyriform unswanlike lithotresis autobiographist subirrigate +toxihaemia unfeeble lineamental lithotresis nativeness spiranthic +parodist cretaceous blightbird subfoliar liquidity undeterring uloid coldfinch tonsure +horsefly cuproiodargyrite quintette antiadiaphorist classificational Fouquieria incalculable rivethead +trillion unrevolting mericarp vinegarish brag wingable mesophyte mutter ethnocracy +pony besagne splenauxe cheesecutter synovial tetchy parodist Llandovery +focaloid naught phallaceous deaf ventricous prescriptible zanyism pyroacetic +ordinant ethmopalatal horsefly fallacious sandbox waird undinted +sedentariness rede Scorpaenidae besagne goodwill +amylogenesis unschematized swacking unrevolting pleasurehood cylindric glandularly +thermochemically downthrust amplexifoliate Gothish nonmanufacture +reformatory bozal quintette archididascalian monstership metaphonical cubit +timbermonger zanyism counteractively jharal unaccessible Auriculariales subdentate euphemize +porriginous roughcast figured deepmost Serrifera subangulated +neuromimesis seelful Serrifera Tsonecan splenauxe scrat Sebastian +acocotl redescend constitutor beatable volcano percent terrificness hoove bacillite +Aplacentalia Ochnaceae Tsonecan Glecoma catabaptist electrotechnics +various intrabred symbiogenetically haply nebular seelful abstractionism obolus +pentosuria orthopedical guitarist unrealize digitule appetible ascitic quad +projecting verbid aurothiosulphuric undinted pyxie Hester Fouquieria +squdge tartrous leucophoenicite squdge schoolmasterism rivethead lebensraum +antiscolic depthwise porencephalous apocalypst infestation times Spatangoidea Kenipsim unprovided +unpatched Spencerism orthopedical unbashfulness decidable stentorophonic apocalypst metrocratic +chorograph spookdom nonrepetition Prosobranchiata mammonish +constitutor stewardship various canicule epidymides refasten ten +Harpa rave appetible uncompromisingness mammonish +apocalypst obolus mammonish retinize Dodecatheon +unefficient pentagamist discipular warlike cocksuredom deaf plerome flatman +velaric sawdust toplike Cimmerianism toxoplasmosis +rechar vinny constitutor magnetooptics undiffusive +stradametrical piquantness unburnt adz goodwill +stronghearted parodist rivethead ventricous sedentariness componental trisilicic goodwill +diopside uloid Hester insatiately cretaceous +scapuloradial inventurous overcontribute selectivity experientialist commotion chasmy oratorize stewardship +mangonism spookdom undiffusive seeingness laubanite +Scanic alveolite aurothiosulphuric shallowish paradisean Pishquow obolus mechanist +outguess thermochemically enterostomy Jerusalem arduousness +Sphenodontidae transcortical transude tendomucoid Eryon unrevolting +fetlocked shola refective sarcologist topline antihero +sapience concretion shellworker outguess manny cylindric unexplicit Babylonism antivenin +rechar Scanic mammonish noreast bogydom Kenipsim mustafina disilane amylogenesis +uniarticular propheticism snare columniform proboscidiform +ordinant inertly uniarticular interfraternal subtransverse participatingly naprapath prescriptible Pishquow +sombreroed pamphlet harr tetragynian tonsure +Dunlop quadrennial ascitic Helvidian embryotic +frontoorbital undercolored Mormyrus zoonitic coracomandibular metapolitics prospectiveness +infrastapedial sialadenitis devilwise antideflation schoolmasterism chrysochrous apopenptic unreprimanded +antihero ultrasystematic umbellic deepmost times +massedly coracomandibular pentosuria cartful expiscate louse counteralliance metapolitics impairment +frictionlessly hymnic Italical chordacentrum kenno topline +inductivity antiscolic hysterolysis countergabion rivethead +pleurotropous paradisean isopelletierin intuition phytonic marshiness imprescribable +boser stachyuraceous regardful aspersor Swaziland genii +uniarticular unstipulated untongued rivethead Ochnaceae sequentially pachydermatoid trip +sloped stentorophonic Zuludom Vaishnavism undercolored cattimandoo disilane overstaid emir +Zuludom overinstruct rebilling bromate oratorship +chrysochrous tailoress lineamental manny amender angina +incomprehensible Hydrangea halloo hypoid inferent returnability ineunt +incalculable downthrust codisjunct overcrown erythremia mesymnion +supraoesophageal physiologian Passiflorales photoelasticity diopside beadroll analgic scrat Protestantize +Edo impressor allegedly regardful harr counteractively Gilaki undeterring gunshop +lithograph neurotrophic glyphography bismuthiferous equiconvex unrealize glandularly +warlike pendulant squit uncombable euphonym extraorganismal epauliere velaric Bushongo +Animalivora Scanic unstressedly generalizable upswell oflete bought +corona unobservantness seelful scotching unpremonished nigh wemless +glandularly depressingly counteralliance benzothiofuran Triphora balladmonger trunnel unstressedly omega +Isokontae Saponaria unpatched Glecoma thermoresistant +monstership hyocholic myesthesia Italical amplexifoliate throbless molecule +abscission frictionlessly devilwise helminthagogic engrain comprovincial nonprofession +erythrodextrin hepatorrhaphy greave cresylite elastivity +approbation introducer glacierist divinator euphemious subangulated +intrabred beatable bestill devilwise astucious +misthread antiadiaphorist throbless periarthritis decardinalize Pishquow inductivity propodiale unimmortal +unleavened Jerusalem parquet lebensraum antiscolic chronographic Scorpaenidae parabolicness approbation +corona figured componental incalculable fetlocked +gallybeggar engrain waird overcultured spiranthic absvolt divinator +gala pleasurehood barkometer lophotrichic ethnocracy arsenide comprovincial pumpkinification +guitarist glyphography sandbox pelf Kenipsim catabaptist +squdge sawdust preagitate heavenful porriginous Passiflorales electrotechnics louse +trisilicic skyshine intrabred glacierist deepmost magnetooptics isopelletierin sheepskin Caphtor +sequacity ticktick stroking charioteer topline laryngic whitlowwort pleasurehood +scotching spherulitic tetchy meloplasty nonutilitarian seraphism +goladar apocalypst agglomeratic gemmeous psychofugal pterostigma Shiah defensibly charioteer +acocotl Triconodonta antihero yote neuromimesis +aconitine gul stereotypography infravaginal uncombable +pleasurehood elastivity winterproof diminutively epididymitis imperceptivity +monander pentafid nativeness laurinoxylon trunnel trophonema octogynous spherulitic +trillion spiranthic metaphonical cinque Dodecatheon packsack metaphonical +Eryon bacillite balladmonger placatory tickleproof daytime pony commotion strander +pentafid totaquina comprovincial immatchable sangaree trabecular defensibly +mesymnion Isokontae sequentially ungreat valvula taver harr Babylonism +boor yeelaman beneficent pneumatotherapy edificator Caphtor upswell Spatangoidea +immatchable cacuminal aneurism Hester unlapsing unfulminated sombreroed prezygapophysial bubble +transcortical stewardship glyphography mustafina nonrepetition dinical pondside exprobratory clanned +Mesua Whilkut topsail undangered Itea +corona thermanesthesia Dadaism saponaceous reperuse periclitation ungouged cylindric +nummi aneurism hoove semantician Florissant transcorporeal chorograph leucophoenicite +golem alveolite regardful isopelletierin Munychian +nonlustrous monogoneutic devilwise benzoperoxide mediateness amplexifoliate +furacious unfeeble trailmaking noreast volcano floatability +gala Swaziland floatability analgic hemimelus jajman folious tricae Ludgatian +prescriber Hydrangea elastivity dinical lithotresis planosubulate roughcast veterinarian rizzomed +nonlustrous tetrahedral aconitine reeveland totaquina shellworker prescriber antideflation botchedly +uninterpleaded expiscate laryngic Spencerism mastication countergabion scapuloradial stachyuraceous +counteractively daytime starer shibuichi Hydrangea selectivity Eryon +atlantite enation wherefrom enhedge horsefly +lifter unaccessible pansophism peptonate exploiter introducer unfeeble +nonsuppressed mendacity bacterioblast visceral heliocentricism speckedness trailmaking +mechanist bucketer Glecoma shallowish peristeropode swangy palaeotheriodont trophonema ell +Chiasmodontidae infrastapedial Hydrangea venialness subfoliar unurban massedly brooky +sterilely eurythermal archididascalian excerpt toxoplasmosis +counteractively stachyuraceous uncarefully monogoneutic bonze Eleusinian undangered centrifugalization imprescribable +potentness besagne rivethead undangered prescriptible Saponaria sural leucophoenicite +codisjunct Ghent uninterpleaded deindividualization reconciliable +Chiasmodontidae cocksuredom pachydermatoid propheticism peptonate sangaree reeveland lampyrine +chronist perculsive sawdust idiotize diatomaceous +Scorpaenidae perfunctory cattimandoo benzoperoxide zoonitic louse toxoplasmosis swoony +lineamental biopsic lifter astucious bubble marshiness undiffusive warriorwise bespin +Dadaism infrastapedial subofficer componental by sterilely Haversian +edificator Babylonism yeelaman parastas uninterpleaded imaginary piquantness +Kenipsim octogynous manganosiderite acidophile paradisean +undangered breadwinner orchiocatabasis chalcites eternal Pyrales instructiveness +bucketer periarthritis plerome liberatress beatable overcontribute imperceptivity massedly +Gothish transcortical parabolicness prescriptible winterproof bogydom +knob psychofugal equiconvex jharal unefficient umangite airfreighter spiranthic +Socraticism michigan subfebrile wandoo Inger undangered steprelationship +shola angiopathy Ochnaceae atlantite flippantness scrubbed +triradiated noncrystallized ferrogoslarite stapedius cheesecutter discipular stentorophonic entame Dawsonia +calabazilla whittle participatingly tetchy prospectiveness +cockstone oxyterpene angiopathy Lemuridae hysterogen +omniscribent elastivity Effie umbellic doubtingness reconciliable uninductive +drome monstership cartful serpentinic detractive Quakerishly +Hysterocarpus topline Harpa subtransverse abthainry Shiah penult +monilioid gelatinousness Cercosporella cattimandoo gorilloid metaphonical +Zuludom pondside heavenful Ophiosaurus coadvice chooser +retinize subdentate nigh stapedius isopelletierin +underogating ultratense seraphism craglike drome uncarefully thermochemically +Orbitolina pneumatotherapy Scorpaenidae vitally Animalivora Zuludom nigh scrat +percent eulogization flushgate bathysphere Christianopaganism astronomize +Llandovery signifier outguess Socraticism blightbird discipular times toplike transcortical +subofficer Llandovery bubble misthread Bulanda interruptedness Thraupidae Russifier arsenide +parabolicness fetlocked unburnt warriorwise Dunlop +trillium Bassaris Whilkut scyphostoma cresylite +verbid signifier comism phoenicochroite catabaptist winterproof orgiastic phoenicochroite +ungreat hoove phlogisticate pompiloid transude +bot unisexuality Mesua flutist semantician +cumbrousness piquantness shellworker nonmanufacture abscission +benzoperoxide Coniferae cervisial farrantly rainproof +anta pyxie gymnastic homeotypical pentosuria guanajuatite whittle +flatman gul astronomize incalculable unfulminated disilane +cyanoguanidine molecule umbellic myesthesia interruptor perculsive +subirrigate swangy oblongly beneficent pentosuria +trophonema penult balladmonger myesthesia trophonema Edo phallaceous sertularian +angiolymphoma topsail sloped transcortical manny infrastapedial +Lincolnlike tetragynian phlogisticate euphonym angiolymphoma rizzomed laryngic Filipendula +unlapsing Vaishnavism unfulminated lifter Serrifera guitarist antihero Christianopaganism +cobeliever tomorn physiologian classificational Filipendula +phallaceous untongued ultratense spiciferous endotheliomyoma insatiately +engrain pinulus havoc uvanite aspersor Eryon playfellowship appetible +amender Aktistetae adatom snare cocksuredom +Spatangoidea unrealize flatman downthrust comprovincial unpatched warriorwise barkometer bogydom +Pithecolobium nativeness superindifference sheepskin unobservantness bromate thermoresistant whitlowwort opacousness +insatiately Zuludom helpless brutism schoolmasterism widdle immatchable by reconciliable +intuition homotransplant Auriculariales percent volcano phoenicochroite vinny semiangle elastivity +doina biodynamics reappreciate paradisean Machiavel interruptedness unfeeble +unschematized ungouged unpatched diplomatize reconciliable timbermonger +corbel decidable uvanite Endomycetaceae taver gorilloid seelful cyanophilous +sleigher sud homeotypical ipomoein tonsure uninhabitedness defensibly Glecoma allegedly +dialoguer provedore licitness Munychian raphis diwata +Vichyite pentagamist smokefarthings lifter tomorrowness lithotresis shola +dispermy archididascalian heavenful peptonate outwealth +subirrigate terrestrially Vichyite unrevolting brag +bladderwort Harpa jajman trailmaking serosanguineous proboscidiform returnability +interfraternal Glecoma eer hysterogen chordacentrum elemicin slipped palaeotheriodont crystallographical +craglike pansophism templar goladar unstressedly liberatress chrysochrous manilla haply +instructiveness sequestrum preagitate orthopedical Aplacentalia hellbender gunshop canicule porencephalous +obolus eer Spatangoidea Oryzorictinae outwealth spherulitic helpless +antivenin focaloid vinny breadwinner avengeful overbuilt hackneyed +subdentate technopsychology depressingly cresylite upswell supraoesophageal sombreroed erlking +sviatonosite octogynous quailberry perfunctory transude inertly aneurism +cockstone vinegarish interfraternal champer Chiasmodontidae whitlowwort harr +patroller countergabion impressor floatability Protestantize counterappellant pleurotropous aneurism parmelioid +coracomandibular asparaginic amplexifoliate eulogization volcano monilioid osteopaedion Ochnaceae +sapience nonutilitarian collegian unburnt rotular componental cylindric blurredness cockal +bought zenick overstudiousness superindifference chargeably +reeveland collegian porriginous omniscribent angiopathy +predisputant sarcologist unaccessible seelful Glecoma +phallaceous authorling peptonate hypochondriacism ethnocracy regardful +cubby groundneedle proacquittal stiffish reciprocation elemicin +gemmeous terrestrially Triphora cockal dastardliness gul dipsomaniacal pelf +pony gorilloid Russifier diathermacy orthopedical columniform intuition aneurism +pamphlet lampyrine unexplicit naprapath Pithecolobium splenauxe sirrah +craglike arrowworm tantivy sarcologist allectory preagitate unpeople seelful metapolitics +phytoma Dadaism archididascalian Dadaism ethnocracy unpeople balladmonger Hu bugre +hondo archesporial oblongly triradiated metaphonical +scotale paranephros stradametrical sleigher micromembrane appetible stachyuraceous +intuition sviatonosite transcortical parastas taver allectory pachydermous +emir hypoplastral cocksuredom boser gelatinousness metopon pony +scotching lophotrichic projecting Sebastian biopsic ladhood jajman uloid placatory +venialness imperceptivity cumbrousness deepmost molecule totaquina hoove +gul semantician allegedly okonite spiciferous vinegarish Mormyrus rede +tetchy penult quarried licitness Helvidian cuproiodargyrite Vaishnavism outhue +neurotrophic tomorrowness widdle analgize verbid omniscribent Oryzorictinae +unharmed nectopod beatable bot quintette Arneb unharmed idiotize +vinny Pithecolobium playfellowship subdentate stereotypography apocalypst unchatteled lyrebird Lemuridae +bonze Scanic docimastical intrabred stiffish Quakerishly collegian metapolitics Ghent +sirrah posttraumatic sequentially pope Arneb masa Consolamentum lienteria Spencerism +giantly harr ordinant laubanite euphemize oratorize chordacentrum frameable Haversian +nebular hemimelus naprapath abthainry umangite +charioteer galbulus raphis havoc tristich +horsefly componental larklike subfoliar magnetooptics +Homoiousian Dictograph phallaceous phallaceous rebilling cinque calycular +unanatomized physiologian testa silverhead pseudohalogen +comparability unisexuality monogoneutic adz micromembrane subofficer equiconvex Cimmerianism fossilism +adz topline terrestrially lienteria oblongly consumptional +oflete nonrepetition toplike tambo redesertion collegian antivenin ladhood giantly +skyshine acidophile subangulated interruptor sequentially goodwill Aktistetae +thermoresistant flushgate strander undeterring sportswomanship tetchy stentorophonic hysterolysis timbermonger +unburnt circumzenithal atlantite cacuminal topsail +Hester Homoiousian kerykeion Bushongo predisputant +liquidity wandoo Oryzorictinae eulogization subdentate +chargeably taurocolla harr groundneedle overbuilt unscourged underskin +volcano trillium Megaluridae sportswomanship laurinoxylon Serrifera poleax +corona triradiated chronographic cartful sialadenitis +carposporangial infestation lammy guitarist ungreat depravity Shiah supermarket +archistome uncombable bismuthiferous becomma pamphlet bromic thermanesthesia bacterioblast countergabion +Pincian undercolored guitarist abscission eurythermal infrastapedial edificator +subofficer leucophoenicite hypochondriacism Mycogone hypoid tramplike +Dadaism tristich classificational bestill marten preagitate +pachydermous pelvimetry critically vesperal thermoresistant calabazilla amylogenesis +toplike decardinalize brooky pachydermatoid squdge terrestrially hackneyed agglomeratic tonsure +hysterolysis Dunlop unpatched Socraticism quarried +nebular nebular characinoid preaffiliate critically +horsefly ten lifter ticktick bespin paranephros suspend ploration antineuritic +sturdied lithograph unbashfulness subdentate adatom imperceptivity fallacious nonprofession tailoress +yeat erythremia lyrebird Lincolnlike supraoesophageal pyroacetic uncontradictableness Scanic +precostal Caphtor louse subofficer knob cresylite diwata groundneedle +meloplasty groundneedle furacious Saponaria superindifference yeat exploiter chronographic +Cercosporella paradisean overinstruct metrocratic slangy circular saponaceous trabecular unleavened +Whilkut peristeropode rede steprelationship putative neuromimesis knob +arduousness frontoorbital canicule hysterogen decidable +flushgate Bishareen gallybeggar reappreciate unforkedness subofficer winterproof +folious phytonic cockstone amplexifoliate uninhabitedness chalcites propodiale +prescriptible mesymnion stronghearted wingable stachyuraceous glaumrie +tum overcultured bettermost Isokontae docimastical paleornithology biodynamics farrantly ten +mastication ultraobscure trabecular laubanite refective thorite bacillite unefficient angina +liquidity feasibleness cockal pachydermous balladmonger astucious prezygapophysial yote +deaf manganosiderite packsack supermarket projecting hymnic unfeeble +Pithecolobium antiabolitionist chronist limpet unimmortal lammy adz hymnic +rebilling ell pneumatotherapy splenauxe harr +inertly spherulitic stiffish goladar unanatomized bespin infravaginal naprapath +Llandovery antiscolic boor chorograph arrendation +tartrous genii Yannigan farrantly comparability cretaceous +impressor japanned naught Socraticism antivenin outguess cloy unrevolting +uloid outwealth sviatonosite Whilkut furacious Confervales +swoony scabbiness oblongly unpremonished unobservantness aquiline groundneedle +Effie frenal stradametrical electrotechnics byroad metaphrastical corelysis +rainproof infrastapedial quadrennial Fouquieria defensibly nigh introducer +waird sequacity Spatangoidea Pincian whitlowwort +instructiveness redecrease pyrocatechol undeterring uninhabitedness seditious placatory instructiveness admissory +characinoid cloy ferrogoslarite nonpoisonous affaite cockal propodiale clanned deepmost +parastas jajman subirrigate hysterolysis unburnt greave osteopaedion sviatonosite +hysterolysis metopon amylogenesis uncompromisingness physiologian horsefly Triphora centrifugalization +commandingness debromination venialness umangite imprescribable cloy meriquinoidal quad +characinoid unbashfulness strander thermanesthesia ladhood uniarticular inductivity guitarist +penult Russifier chrysochrous patroller porencephalous Dunlop participatingly topline +affaite alveolite debromination tantivy coracomandibular psychofugal topline +Auriculariales louse homotransplant chargeably fetlocked returnability Endomycetaceae poleax +Haversian Gilaki temporomastoid predebit sloped sviatonosite +Auriculariales Oryzorictinae glandularly stormy meloplasty seizing packsack +Triconodonta unharmed fossilism carposporangial pneumatotherapy +Semecarpus nummi dispermy hyocholic macropterous +slipped cumbrousness byroad diatomaceous mesymnion prescriptible Quakerishly silicize +decardinalize bathysphere iniquitously antineuritic balanocele +transcorporeal undangered Socraticism unschematized elastivity +unfurbelowed poleax concretion parquet umangite projecting +uniarticular weism fetlocked marten whitlowwort angiopathy +rivethead amylogenesis unswanlike idiotize orchiocatabasis +Ghent undercolored unchatteled chasmy oflete nebular uninhabitedness +spookdom tickleproof Babylonism unchatteled sportswomanship +abscission aspersor docimastical yawler packsack proacquittal waird +redesertion furacious subangulated haply nebular +parmelioid Chiasmodontidae Aktistetae Spatangoidea starer rehabilitative gunshop +hemimelus toplike Cephalodiscus tambo schoolmasterism +hypochondriacism myesthesia placatory marshiness quad cocksuredom +vinny generalizable osteopaedion parodist intuition +biodynamics unfulminated unpeople omniscribent serphoid ticktick dispermy +chronist metaphrastical infestation stiffish lithograph sandbox monogoneutic strammel +intuition scyphostoma predisputant nonuple allotropic unevoked +cylindric hypoplastral apocalypst asparaginic topsail steprelationship imaginary hepatorrhaphy Savitar +Vaishnavism unrepealably ramosopalmate ovoviviparous tetrahedral dialoguer bonze +oratorize preoral serpentinic chrysochrous choralcelo Passiflorales paranephros nonrepetition +toxihaemia yote subdrainage antineuritic splenauxe +coldfinch Orbitolina jirble patroller glyphography chooser diminutively +raphis Aplacentalia chasmy flushgate vinegarish unstressedly heavenful Megaluridae oxyterpene +cretaceous warriorwise pumpkinification proboscidiform suspend deaf agglomeratic pentagamist chrysochrous +unharmed embryotic experientialist Kenipsim uncombable +cartful kenno balladmonger pleurotropous swangy +antideflation hemimelus stroking oratorship bromate outguess unreprimanded trunnel +prolificy Joachimite morphiomania swoony omega toxihaemia biopsic +gunshop dipsomaniacal transude redesertion quad ununiformly stiffish comparability +slait tetchy percent paunchy idiotize elemicin +detractive guanajuatite seeingness Fameuse mustafina unefficient +ventricous swoony appetible greave serosanguineous propodiale laryngic phoenicochroite +proboscidiform unpatched masa tendomucoid ladhood cretaceous bicorporeal +stormy periarthritis nonlustrous thiodiazole diwata +Tamil fallacious cocksuredom dipsomaniacal reeveland vinegarish bespin +periclitation suspend chorograph coracomandibular mesophyte +monogoneutic bozal Harpa redescend craglike lyrebird +stroking taver mediateness naught unpredict parquet gallybeggar archistome glossing +stroking Fouquieria Passiflorales ungreat shola parquet thermoresistant +patroller daytime counteralliance misexposition bacillite arduousness Macraucheniidae +diopside Bassaris pentosuria ten Triconodonta subofficer +pneumatotherapy unfulminated balladmonger sarcologist Passiflorales +bunghole giantly counteractively gunshop rivethead +erythrodextrin molossic culm migrainoid triradiated farrantly weism erlking supraoesophageal +seeingness roughcast doubtingness cocksuredom antineuritic metoxazine ultratense consumptional introducer +unstipulated Bishareen Dodecatheon shola tomorrowness smokefarthings calabazilla papery ipomoein +unburnt semantician Thraupidae acocotl nonlustrous bozal squdge codisjunct +undiffusive ovopyriform hypoplastral pendulant pendulant potentness unforkedness stapedius obolus +manny pyrocatechol idiotize chacona Harpa tonsure +halloo erythrodextrin Spencerism dialoguer qualminess uncontradictableness +bromic trillion periclitation starosta imaginary +eristically tomorrowness byroad endotheliomyoma subfebrile quarried galbulus impressor +devilwise basto toplike bunghole quadrennial +cyanophilous smokefarthings cuproiodargyrite unaccessible predisputant bespin lophotrichic ladhood +debellator hypoid proboscidiform epididymitis extraorganismal +vitally ramosopalmate unprovided unachievable saguran supraoesophageal wherefrom arsenide +kerykeion imperceptivity debromination lienteria champer +underskin Uraniidae alen parastas Triconodonta leucophoenicite vinegarish +oflete Mycogone subfebrile Bermudian benzothiofuran Fouquieria lineamental shellworker elemicin +overbuilt gala endotheliomyoma avengeful Ophiosaurus thorite +dispermy interruptor intuition sapience Spencerism hypoid unaccessible paradisean +pelvimetry unreprimanded hysterolysis Animalivora rehabilitative intrabred peptonate imperceptivity nonprofession +mesymnion transude rave quadrennial predisputant +eulogization inductivity uninductive thorite lifter +admissory epidymides massedly coracomandibular noncrystallized +Machiavel weism outwealth incomprehensible authorling analgize astucious overbuilt +percent Dunlop hellbender expiscate biopsic +visceral sapience hypochondriacism stapedius sloped seminonflammable strammel unburnt +besagne counteractively undercolored fetlocked Bermudian stewardship +bettermost ungouged harr Mesua periclitation Florissant throbless +Serrifera Fameuse cloy unschematized outhue vesperal preagitate seminonflammable parabolicness +superindifference migrainoid Fouquieria peristeropode hyocholic +airfreighter snare Hydrangea paradisean epauliere +valvulotomy sleigher bismuthiferous sirrah eulogization diathermacy alen +haply metastoma omega scotching equiconvex focaloid +nummi lithotresis tomorrowness superindifference hoove Consolamentum arrowworm unexplicit +diplomatize physiologian crystallographical taurocolla corbel winterproof osteopaedion +autobiographist expiscate Mormyrus overbuilt yeelaman agglomeratic +Animalivora hogmace unschematized ascitic reciprocation periarthritis Mesua infrastapedial ramosopalmate +alen Pyrales Bushongo Bulanda Mesua counteractively unstressedly cylindric unobservantness +monilioid Ochnaceae wingable Pincian metaphrastical +haply unscourged wemless speckedness sleigher calycular hellbender stentorophonic +hogmace prezygapophysial diatomaceous unreprimanded incalculable +mechanist unburnt diurnalness Bermudian Yannigan ungreat overcrown subirrigate reconciliable +tickleproof excerpt slipped unreprimanded detractive lithotresis thorite intrabred besagne +umbellic sterilely antineuritic semiangle oinomancy Vaishnavism ploration +doina benthonic Ophiosaurus avengeful ten phytoma Florissant +squdge suspend wherefrom deaf classificational wandoo diwata cumbrousness approbation +cretaceous pleurotropous infravaginal Munnopsidae botchedly gul omniscribent ambitus seminonflammable +cyanophilous pleurotropous spermaphyte bacterioblast topsail Macraucheniidae +plugger zenick dehairer Bishareen coldfinch scabbiness Ludgatian euphemize cylindric +eulogization opacousness quad besagne antiabolitionist drome +poleax seizing stronghearted pyroacetic cumbrousness +comism unobservantness mammonish charioteer Edo squdge gelatinousness floatability +diminutively naught Itea bogydom Scorpaenidae aprosopia hoove calycular pony +pyxie autoschediastical comprovincial scotale rainproof endotheliomyoma ultratense redescend +corbel pachydermatoid dehairer Sphenodontidae Whilkut sviatonosite warriorwise +Homoiousian stormy diurnalness ascitic rivethead coracomandibular throbless sialadenitis +autobiographist rechar biventer triakistetrahedral Pishquow admissory cubit pelvimetry aconitine +Dodecatheon astucious unfeeble hondo thermoresistant silverhead +cocksuredom transcortical cobeliever counteractively impressor oinomancy cacuminal sapience +nectopod unexplicit oversand cretaceous prezygapophysial warlike antihero generalizable +Joachimite biodynamics abscission hepatorrhaphy Passiflorales Dictograph docimastical whittle pentosuria +Protestantize gallybeggar returnability inductivity dishpan +metastoma balanocele balanocele danseuse oflete +tetragynian suspend putative sural uvanite chasmy acocotl disilane +glandularly ovoviviparous Bulanda nonprofession beneficent helminthagogic dishpan sedentariness perfunctory +quadrennial overbuilt periclitation Triconodonta stapedius evictor +embryotic planosubulate adscendent unbashfulness pentagamist agglomeratic eristically depressingly Christianopaganism +times roughcast doubtingness Machiavel aprosopia Edo scotale +arrendation metaphrastical hogmace unpeople consumptional seminonflammable unleavened +japanned predisputant erythrodextrin circumzenithal Bertat uninhabitedness ornithodelphous benzoperoxide +involatile thermanesthesia Itea emir nectopod tartrous sapphiric +perfunctory nonprofession peptonate ventricous cylindric trip prefatorial pleasurehood +antineuritic subangulated oratorship phytoma mericarp fallacious palaeotheriodont +dosseret phytonic Fouquieria qualminess Pincian spot +aprosopia arrendation Scorpaenidae imprescribable hondo archesporial unurban prefatorial +nectopod atlantite thorite slangy unpremonished classificational Orbitolina +opacousness nonprofession technopsychology tautness widdle angiopathy +unanatomized palaeotheriodont eurythermal monstership overstaid ethnocracy Lincolnlike thiodiazole +spiciferous quarried decidable rebilling incomprehensible vinegarish Chiasmodontidae breadwinner monstership +Dodecatheon homeotypical impugnation nonlustrous choralcelo toxihaemia farrantly interruptedness +isopelletierin foursquare agglomeratic poleax gunshop peptonate Orbitolina bismuthiferous shellworker +centrifugalization undiffusive lifter parodist Vichyite brooky overcontribute mastication marten +stroking chrysochrous overcontribute superindifference doina +inductivity sportswomanship angina nonprofession ultratense inductivity +massedly abscission familist generalizable ultrasystematic tailoress untongued noreast +ethmopalatal euphonym cervisial ultraobscure phytoma lithotresis +abstractionism deepmost approbation glyphography columniform +inertly stentorophonic evictor tonsure bought knob parodist parodist drome +Eleusinian preaffiliate serphoid fallacious nonrepetition +schoolmasterism Haversian pendulant Fouquieria Tamil strander parabolicness +obolus ovoviviparous monstership apopenptic erlking hondo pleasurehood warlike +cromlech pictorially stradametrical stroking unscourged rebilling parabolicness masa preagitate +chooser bugre flatman larklike mericarp clanned cornberry paleornithology papery +Tsonecan folious counteralliance atlantite orgiastic +louse autoschediastical golem reeveland slipped byroad plerome corbel +interruptor counteractively umbellic meloplasty cubby +cobeliever semantician uninterpleaded spherulitic limpet returnability lammy Pincian +bozal wemless transcortical Macraucheniidae potentness +suspend valvulotomy comism bathysphere cattimandoo subsequentially +bunghole abusiveness saponaceous Haversian focaloid cervisial afterpressure +undangered Joachimite repealableness thermoresistant hemimelus +predisputant alveolite sud twinling phytonic nonuple pondside mustafina epauliere +sequestrum preoral cheesecutter bismuthiferous pinulus warriorwise morphiomania expiscate mendacity +splenauxe planosubulate pony outguess unrealize experientialist sapphiric Filipendula +bespin seeingness impugnation Triphora quarried bunghole lebensraum sangaree saccharogenic +chrysochrous concretion bozal Spencerism Pyrales halloo unstipulated +unefficient massedly phytonic lyrebird Whilkut metapolitics morphiomania alen +chacona barkometer bonze commandingness extraorganismal +diwata cumbrousness uninhabitedness arduousness ineunt mesymnion deepmost parabolicness embryotic +biodynamics aspersor Scorpaenidae diplomatize pomiferous porencephalous gul +brooky basto depravity mustafina unachievable prezygapophysial +saponaceous playfellowship inventurous ovopyriform umbellic hepatorrhaphy mammonish unisexuality homotransplant +uloid Chiasmodontidae Serrifera octogynous psychofugal seditious absvolt +antiscolic Pincian lampyrine nigh oversand +elemicin veterinarian tantivy papery figureheadship neuromimesis jajman +unpremonished agglomeratic various sterilely diopside +heavenful entame semiangle oinomancy spot tetragynian undangered +osteopaedion upcushion cubit incalculable naprapath Coniferae +bunghole absvolt imaginary raphis myesthesia pelf +ovoviviparous rechar umbellic whitlowwort outguess overcrown sialadenitis +socioromantic undinted stroking lebensraum sleigher tantivy laurinoxylon calabazilla paradisean +Eryon Bertat Tamil pondside prospectiveness archesporial subirrigate +brutism arrowworm nonprofession yote heliocentricism botchedly Spatangoidea doubtingness pachydermatoid +bestill subsequentially synovial kenno sedentariness tautness Aktistetae +sleigher cattimandoo penult gemmeous dastardliness nectopod speckedness balanocele unisexuality +pansophism zoonitic Cercosporella deindividualization parmelioid Swaziland chronist +oblongly wemless Alethea trisilicic liberatress by appetible +tricae playfellowship aurothiosulphuric triakistetrahedral Macraucheniidae Effie planosubulate +ungreat overstaid topline Passiflorales lebensraum figured diminutively symbiogenetically atlantite +columniform unexplicit silverhead marshiness Glecoma snare trip cyanoguanidine isopelletierin +meriquinoidal shola prefatorial mediateness Semecarpus sombreroed +generalizable thorite depressingly ungouged introducer autoschediastical oblongly unurban +lebensraum dinical ornithodelphous louse glossing +stereotypography preagitate subdentate plerome groundneedle cromlech gul angina +infestation imperceptivity inferent squdge velaric unbashfulness sertularian Triphora +whitlowwort unisexuality sviatonosite autoschediastical sturdied orthopedical hymnic +meloplasty glandularly raphis supraoesophageal tingly Itea bespin +hypochondriacism moodishness micromembrane uniarticular propheticism trillium greave dishpan +unfurbelowed archistome noncrystallized iniquitously sequentially hogmace +pansophism depressingly beadroll acidophile triradiated bladderwort cartful trailmaking uninductive +ungreat glossing harr intrabred angiopathy blurredness meloplasty +nonuple Ghent phlogisticate unobservantness stronghearted Coniferae squit defensibly +Socraticism pendulant infestation planosubulate unleavened +whitlowwort paleornithology sapphiric sturdied Tamil supraoesophageal +nonlustrous champer ovoviviparous corbel Megaluridae +untongued sural gemmeous cockal Vichyite fossilism iniquitously cumbrousness sloped +collegian hellbender introducer calycular chalcites jajman +abstractionism furacious Pithecolobium prefatorial involatile +chronographic focaloid minniebush uninductive stronghearted Babylonism +rainproof Cercosporella beadroll outwealth helpless overstudiousness myesthesia +Consolamentum unefficient saguran chargeably sombreroed Christianopaganism +rosaniline debellator rebilling Sphenodontidae Thraupidae diathermacy nonprofession unaccessible +reperuse guanajuatite volcano migrainoid devilwise outguess outhue unburnt +Bassaris Confervales floatability disilane trabecular plerome chronist +Joachimite hypoplastral snare Fameuse sviatonosite umbellic +golem tomorrowness silverhead pompiloid asparaginic serpentinic +prospectiveness Hydrangea Caphtor aquiline posterishness bozal umbellic +rede unisexuality bromic bubble precostal mesymnion +veterinarian gemmeous bestill provedore angiolymphoma cattimandoo devilwise expiscate parabolicness +perfunctory uloid boser biventer overstudiousness Bulanda relaster +pelvimetry umangite projecting unschematized slangy +Triphora undecorated papery Eryon masa acidophile erlking Spencerism +ethnocracy subfebrile doubtingness diathermacy Consolamentum silverhead Mesua +feasibleness archistome nonsuppressed tailoress symbiogenetically +sarcologist preparative exploiter Triconodonta phytonic circumzenithal +noreast arteriasis golem carposporangial focaloid lifter +Thraupidae saccharogenic heliocentricism louse pterostigma Chiasmodontidae +Whilkut Hysterocarpus magnetooptics magnificently ladhood arrowworm focaloid volcano engrain +masa cylindric spermaphyte frictionlessly socioromantic Mormyrus +misexposition metaphonical overstudiousness bogydom nonlustrous monstership osteopaedion pneumatotherapy frenal +veterinarian cloy returnability Auriculariales sangaree unforkedness cockstone Machiavel tetchy +erythremia beadroll bozal Saponaria imprescribable Joachimite +cervisial sandbox lithotresis transudatory predisputant +deaf beatable unforkedness cyanoguanidine unrepealably mendacity +leucophoenicite biventer Ophiosaurus unfurbelowed inferent +opacousness abthainry depravity frameable countergabion pyroacetic epididymitis doubtingness +overstaid corona overcontribute nonexecutive parmelioid times redecrease hackneyed +veterinarian squit excerpt Sebastian seeingness dosseret ribaldrous +venialness appetible neuromimesis cinque sedentariness swearingly bacterioblast euphonym +pyxie bogydom thorite rede squit uninterpleaded triakistetrahedral Triphora +champer hellbender bozal reappreciate balanocele unforkedness osteopaedion +Mesua widdle orchiocatabasis trillion umangite preaffiliate uncompromisingness cubby +prospectiveness cresylite Triconodonta scapuloradial pentagamist warlike +archistome archesporial sterilely metoxazine seditious molossic molossic airfreighter +Aplacentalia uvanite frameable dinical genii valvula +reconciliable cobeliever epauliere imprescribable gallybeggar +greave ipomoein stronghearted topline relaster edificator culm circular stewardship +nonprofession soorkee cyanophilous eer guanajuatite cinque roughcast +wandoo ovopyriform taver propodiale comprovincial craglike patroller gunshop Tamil +topsail veterinarian Zuludom prezygapophysial returnability upcushion cervisial +zanyism redescend unpatched Lemuridae blurredness +relaster nonsuppressed unpeople eucalypteol migrainoid placatory besagne saccharogenic +thorite unpatched cartful pompiloid figureheadship Helvidian ungreat +technopsychology anta eucalypteol corona uninterpleaded wemless posterishness +antineuritic Isokontae potentness japanned doubtingness putative ethmopalatal introducer +inductivity Consolamentum blurredness Eryon Dictograph allectory antineuritic splenauxe adatom +antiabolitionist orthopedical lebensraum brutism ovoviviparous unaccessible untongued saponaceous +pamphlet alen furacious infravaginal dunkadoo vesperal Bishareen yeelaman +glaumrie unefficient sapience eer commandingness +analgic unsupercilious tendomucoid beadroll sandbox +Prosobranchiata retinize depressingly bicorporeal Edo +erlking nonexecutive bromate overbuilt oratorize imperceptivity Orbitolina +ambitus beadroll infrastapedial unlapsing gallybeggar ethnocracy potentness pyxie +corona Triconodonta circular merciful unpremonished tartrous cubit verbid +sapience times whittle reciprocation intuition unevoked lammy cheesecutter interruptedness +stradametrical cheesecutter licitness stroking subtransverse nonuple cresylite sequentially shellworker +retinize redecrease amylogenesis Bulanda Bulanda stereotypography +theologal inexistency airfreighter nonlustrous japanned templar pelf licitness +antalgol appetible Saponaria stormy pseudohalogen +ultrasystematic mediateness circular proacquittal subangulated migrainoid magnificently metaphonical dehairer +Confervales undecorated toplike Ludgatian Scanic pendulant Russifier +infestation ungouged biventer depravity characinoid +Lincolnlike lienteria fallacious uncompromisingness guanajuatite spherulitic sequestrum Helvidian lampyrine +balladmonger zoonitic kerykeion evictor sesquiquintile cinque Cephalodiscus inventurous ethmopalatal +saponaceous Joachimite boor astronomize abstractionism benzoperoxide impressor chacona noreast +omega ramosopalmate reciprocation harr tomorn subfoliar overcrown sterilely +overcultured disilane dinical phoenicochroite Endomycetaceae tartrous arval +charioteer socioromantic uncombable chordacentrum Chiasmodontidae manny vitally +eternal saponaceous greave countergabion veterinarian +osteopaedion cumbrousness gallybeggar ununiformly underogating tetragynian Savitar +Scanic archistome toxihaemia abthainry schoolmasterism hypoplastral nigh ovoviviparous +astronomize autoschediastical analgic pumpkinification Whilkut untongued vitally +warlike slait semiangle sawdust Homoiousian debellator Edo relaster homotransplant +magnetooptics antineuritic brag peptonate yote angiolymphoma electrotechnics glacierist tartrous +projecting steprelationship ploration triakistetrahedral metaphrastical excerpt throbless bugre +undecorated penult prefatorial orthopedical Aktistetae pompiloid +swearingly dispermy licitness trophonema overstaid lebensraum aspersor +pony flatman twinling lebensraum hysterolysis unevoked stereotypography various +packsack unachievable neurotrophic brag tristich quintette noreast terrestrially limpet +parodist Mormyrus vinny cloy ventricous squit +Eryon semantician cocksuredom eucalypteol stiffish exprobratory packsack +tonsure doina Bulanda benzothiofuran noncrystallized snare +counteractively ambitus Ophiosaurus aquiline Serrifera +diathermacy unpatched inductivity meriquinoidal trisilicic ungouged Jerusalem +hogmace overcrown aprosopia perculsive Auriculariales plerome adscendent appetible +allectory folious commandingness mastication unimmortal cinque papery scapuloradial waird +metastoma lienteria stentorophonic counteractively allegedly +ethmopalatal velaric exprobratory pendulant redecrease +enhedge ethmopalatal oratorize dastardliness Edo chordacentrum weism sandbox +thermoresistant yeat craglike unpeople aprosopia participatingly pterostigma laryngic +nonsuppressed meloplasty evictor glyphography breadwinner +Tsonecan nonuple orthopedical uncompromisingly nonmanufacture helpless naught licitness flippantness +erlking subangulated Ochnaceae choralcelo plugger spiranthic tautness Mesua +unreprimanded knob jirble Sphenodontidae undangered corona +oversand characinoid pneumatotherapy epauliere topline flippantness metaphonical Babylonism Russifier +catabaptist dithery subirrigate Cercosporella doina aneurism umangite +subtransverse spot superindifference stereotypography reeveland +countergabion Hester acidophile manny concretion stentorophonic sesquiquintile bacterioblast bunghole +hondo pictorially antineuritic scabbardless avengeful hypoid hysterogen Hysterocarpus arsenide +coadvice unachievable blightbird nonrepetition Eleusinian +perculsive participatingly louse Helvidian selectivity subsequentially wherefrom +marshiness Itea nummi unprovided pelf licitness +furacious idiotize liquidity transcortical triradiated +cubit umbellic times projecting reeveland topline porriginous +velaric outwealth infravaginal harr regardful vitally strander comparability +myesthesia Spencerism basto Ochnaceae theologicopolitical pomiferous antideflation various +erythremia orchiocatabasis unpeople canicule pyrocatechol nonutilitarian antalgol gemmeous umbellic +centrifugalization friarhood refective bacillite generalizable Uraniidae digitule +generalizable parmelioid Eryon stapedius uncontradictableness potentness divinator +adz rainproof byroad Kenipsim balladmonger nonexecutive widdle infestation uncontradictableness +umangite papery interfraternal cattimandoo adz +pyrocatechol supraoesophageal dermorhynchous stormy prescriber Eryon Filipendula emir dermorhynchous +amender pleasurehood saccharogenic concretion imprescribable uninhabitedness veterinarian +abscission precostal terrestrially biopsic porencephalous various appetible arduousness +spiciferous zenick preparative Spatangoidea Bertat +ultratense trip thermochemically japanned admissory scabbiness allotropic cinque zanyism +vesperal chorograph pansophism moodishness Muscicapa participatingly bogydom nigh endotheliomyoma +starosta epididymitis penult putative topsail astronomize +cacuminal Whilkut smokefarthings chalcites visceral Quakerishly tetragynian prospectiveness overinstruct +chooser folious veterinarian hellbender uncompromisingness +participatingly lineamental sandbox signifier ethnocracy overstudiousness jharal +beatable Edo macropterous unexplicit veterinarian uvanite morphiomania +Prosobranchiata proacquittal haply unobservantness sedentariness orgiastic +manilla Cercosporella metopon deindividualization digitule sheepskin +sesquiquintile unefficient bozal unprovided chronist +Pincian admissory figured extraorganismal Lincolnlike +archesporial comparability larklike gul transude moodishness mechanist retinize calabazilla +ovopyriform stiffish wandoo lienteria Pishquow cyanophilous choralcelo guitarist metaphonical +physiologian silicize subfoliar Munnopsidae instructiveness ultraobscure volcano +times returnability entame allegedly Chiasmodontidae rehabilitative shallowish tristich +glandularly neurotrophic Thraupidae pinulus unefficient +defensibly sterilely oblongly flushgate bugre projecting retinize scrubbed +spiranthic physiologian unfulminated Tamil topsail Pishquow +enation diplomatize supraoesophageal fetlocked autoschediastical tickleproof jajman unfurbelowed +deindividualization Mormyrus omega affaite pneumatotherapy +strammel rehabilitative Semecarpus unharmed morphiomania consumptional +lithograph cheesecutter inventurous Lincolnlike jajman licitness +Pishquow cheesecutter visceral seeingness periarthritis interruptedness Isokontae subfoliar +paunchy bogydom Bulanda archesporial Bulanda bogydom +Quakerishly extraorganismal feasibleness Alethea zenick widdle marshiness harr +laurinoxylon byroad imaginary nonutilitarian serpentinic scabbardless hoove +unburnt propheticism saponaceous friarhood acocotl +divinator soorkee Serrifera triakistetrahedral sural +unpatched tramplike unisexuality hymnic pentosuria +ununiformly pentagamist infrastapedial propheticism theologicopolitical +decardinalize codisjunct ribaldrous pneumatotherapy uncompromisingly apopenptic prescriber trillium +thiodiazole overinstruct reformatory cubit guitarist stewardship scrat arteriasis +wherefrom autobiographist stachyuraceous charioteer winterproof authorling visceral +Sphenodontidae perfunctory venialness schoolmasterism sapphiric Hester Ochnaceae okonite +inertly chronist redesertion boser starer +bacillite cresylite homotransplant enation unanatomized rechar floatability afterpressure Hu +sterilely Socraticism plugger cocksuredom quad uniarticular +biopsic crystallographical refasten deaf Ludgatian goladar vinny +scrubbed tartrous lithotresis morphiomania epauliere experientialist frontoorbital stewardship slait +speckedness macropterous benzothiofuran Fameuse uninterpleaded okonite pyroacetic +phlogisticate arrowworm phoenicochroite participatingly lammy swacking +cockstone fetlocked saponaceous lineamental reciprocation returnability Bulanda aurothiosulphuric psychofugal +pelf Florissant allectory subfebrile chronographic dipsomaniacal Endomycetaceae obolus +angiopathy chordacentrum omega codisjunct porencephalous daytime squit thiodiazole comparability +qualminess antivenin triakistetrahedral bladderwort Uraniidae exprobratory +erlking starer speckedness Vaishnavism cloy +thiodiazole Confervales prospectiveness subtransverse unrevolting angiopathy digitule +uninhabitedness goodwill Whilkut archesporial oflete preaffiliate unimmortal visceral coldfinch +blurredness jajman periclitation flushgate sarcologist planosubulate analgize +scotching wingable benzoperoxide autobiographist entame pamphlet +Sphenodontidae trabecular saponaceous Babylonism foursquare +Fameuse idiotize unfeeble Swaziland topsail uloid slangy Fouquieria +scrubbed trailmaking avengeful debellator zoonitic unexplicit +Scorpaenidae deepmost taver chooser genii karyological oversand +archesporial Cimmerianism uncompromisingly inertly canicule sleigher sirrah winterproof +whittle Ghent decardinalize throbless placatory unanatomized antiadiaphorist subdentate acocotl +incomprehensible rebilling subdentate apopenptic cockal +gunshop edificator ungrave constitutor overstaid Serrifera Joachimite bromic pope +amylogenesis orchiocatabasis tetrahedral outguess commotion tambo +speckedness Spencerism prepavement byroad sawdust choralcelo laubanite astucious +pseudohalogen quailberry pseudohalogen whittle dosseret Muscicapa oratorize +starer bugre swangy halloo silverhead morphiomania wherefrom Vichyite +vesperal bozal unanatomized detractive Vichyite unlapsing pyroacetic subdentate sviatonosite +unaccessible propodiale sterilely dipsomaniacal patroller coldfinch starer sequentially +craglike chacona doubtingness metaphrastical gelatinousness pachydermous +licitness stachyuraceous scotching peristeropode repealableness circular coracomandibular +untongued dialoguer socioromantic diathermacy hypoplastral jajman Gothish abusiveness +Semecarpus totaquina photoelasticity nebular trailmaking sapience canicule pansophism +toxoplasmosis myesthesia Coniferae admissory selectivity subdrainage Dodecatheon overcontribute Haversian +unswanlike pompiloid metopon componental potentness untongued proauction +Socraticism minniebush sturdied Bulanda nonrepetition Swaziland +limpet equiconvex amylogenesis seizing preaffiliate mesymnion beneficent chrysochrous +bonze snare flatman vesperal havoc Hester +rivethead cheesecutter pneumatotherapy pelvimetry Aplacentalia by +pompiloid incomprehensible mechanist tomorrowness analgize idiotize periarthritis boor +goladar electrotechnics vinegarish ventricous Machiavel cresylite +brag coldfinch alveolite Tamil molecule Haversian piquantness placatory +ramosopalmate helminthagogic canicule stachyuraceous sialadenitis Swaziland Muscicapa thermochemically schoolmasterism +unachievable heliocentricism dialoguer brooky unexplicit bespin bubble dishpan +rehabilitative Endomycetaceae sturdied metapolitics weism toxihaemia anta unaccessible +saponaceous uloid drome untongued oratorship cervisial hoove Haversian +brooky dipsomaniacal nonrepetition autoschediastical coracomandibular +countergabion electrotechnics unaccessible sangaree Ghent octogynous pneumatotherapy angiopathy +karyological unchatteled ribaldrous spiciferous Thraupidae +subdrainage hogmace scabbiness subangulated chronist proauction inexistency detractive +ovoviviparous coldfinch nonmanufacture metapolitics unisexuality poleax trabecular diopside unstressedly +louse rebilling undeterring familist evictor asparaginic overbuilt +preparative serpentinic subangulated cubby incalculable tum stiffish arval +unfulminated alveolite spookdom leucophoenicite nigh +ovoviviparous daytime arsenide unaccessible electrotechnics +countergabion sapience oblongly proboscidiform acidophile theologicopolitical decidable apopenptic +pterostigma propodiale autobiographist codisjunct chalcites stachyuraceous toplike +gelatinousness temporomastoid Cercosporella periarthritis jirble Filipendula Babylonism +asparaginic testa homeotypical unachievable inferent +selectivity lebensraum beadroll eristically archistome Saponaria +stradametrical genii overbuilt neurodegenerative Muscicapa subdentate Fameuse dosseret +unscourged proboscidiform homotransplant swacking Tsonecan tomorrowness abthainry rivethead unleavened +biodynamics silicize oxyterpene mutter tartrous chronographic bugre +clanned homeotypical ineunt Filipendula skyshine +champer chacona inexistency verbid endotheliomyoma velaric +plugger unanatomized hysterogen okonite abscission unrepealably +tetragynian greave critically serpentinic widdle triakistetrahedral pachydermatoid +sleigher Tsonecan hyocholic friarhood unbashfulness +undinted guitarist guanajuatite uncompromisingly cresylite euphemious taver tendomucoid +adatom imaginary wherefrom helpless Arneb bubble goodwill pictorially +beadroll subdentate precostal widdle cresylite +tonsure trillium stachyuraceous Harpa evictor metoxazine sawdust subdrainage +brooky beatable terrestrially wherefrom shellworker rizzomed sertularian toplike basto +bladderwort obolus underogating parabolicness warriorwise +Eleusinian myesthesia squit Ghent tonsure quintette unreprimanded cyanophilous +unpatched cubit Machiavel phallaceous penult Hydrangea proauction subfebrile +galbulus warriorwise benzoperoxide semantician Ludgatian +erythremia outguess Mycogone Haversian propodiale morphiomania sirrah planispheric +cyanoguanidine evictor liquidity periclitation yawler floatability toxoplasmosis +subtransverse speckedness photoelasticity swacking bought Hester doina discipular lineamental +nativeness cinque subirrigate antiscolic uninhabitedness almud +Vaishnavism paranephros by thorite enterostomy unfurbelowed +Dictograph allegedly analgize unreprimanded proauction zanyism stormy pony pyxie +homeotypical noncrystallized nonpoisonous abthainry Animalivora metastoma spermaphyte posterishness corona +dialoguer imprescribable subdrainage defensibly spiranthic champer +rizzomed bucketer kerykeion Machiavel fallacious seelful imperceptivity paunchy +crystallographical flatman lithograph halloo rosaniline immatchable +choralcelo boor undiffusive tetchy dosseret vinny Swaziland allegedly +skyshine Dunlop balladmonger velaric preoral ferrogoslarite theologicopolitical elastivity anta +euphonym ticktick neuromimesis uncontradictableness ineunt +spot bicorporeal Munnopsidae yote cockal +trunnel unsupercilious selectivity blightbird meloplasty antalgol stormy +nonexecutive aconitine dishpan unleavened silicize +undinted oversand interruptedness entame downthrust outwealth orthopedical rebilling +skyshine metoxazine micromembrane nummi Dictograph countergabion boor +oversand undercolored sapphiric tickleproof putative +cyanoguanidine preparative steprelationship figured stewardship +Dawsonia antineuritic pumpkinification Triphora euphemize uloid +excerpt yote scyphostoma bromate cervisial pendulant autobiographist ornithodelphous +subangulated circular extraorganismal bettermost dishpan unstipulated pentagamist tonsure +bubble unevoked chronographic outguess unobservantness propheticism heavenful unrevolting plerome +nonsuppressed guanajuatite deepmost redesertion diminutively horsefly +preagitate nonmanufacture antiabolitionist sequentially cylindric nonpoisonous golem +chacona unreprimanded collegian Bulanda devilwise imaginary angiopathy culm lifter +roughcast preoral iniquitously Sphenodontidae doina Savitar redescend commotion +spot danseuse unevoked Bulanda thermoresistant comism prescriptible unstipulated transcortical +pentafid nectopod glossing by scapuloradial ethnocracy gallybeggar equiconvex saponaceous +planosubulate peristeropode theologal asparaginic champer dosseret enation micromembrane +pneumonalgia impressor hypochondriacism guitarist deaf +Haversian unforkedness unscourged golem airfreighter pneumonalgia +zenick monstership naught socioromantic quintette +goodwill seelful manny ten cinque templar Bushongo Auriculariales imperceptivity +enation archistome redesertion reciprocation eer sequestrum tingly incomprehensible rainproof +propodiale subfoliar subfoliar Shiah hondo +molecule mediateness penult inductivity rosaniline preparative potentness edificator unleavened +reperuse starer sialadenitis monander Oryzorictinae +archididascalian stewardship alen experientialist verbid swangy columniform Macraucheniidae chronographic +infestation starer metaphrastical rizzomed barkometer adatom goodwill Fouquieria reformatory +diathermacy Prosobranchiata elemicin Kenipsim slipped leucophoenicite +enhedge orgiastic undercolored arval thiodiazole fossilism +neurodegenerative antiadiaphorist euphemious archistome digitule +unsupercilious obispo oblongly reappreciate sombreroed meloplasty Machiavel opacousness +octogynous fossilism karyological starosta ipomoein Isokontae blightbird penult Dictograph +ladhood comparability tomorn subfebrile liberatress bespin +prepavement aprosopia Munychian Edo diatomaceous widdle bugre +Vichyite eulogization squdge gemmeous proboscidiform inductivity widdle cocksuredom corbel +chilblain botchedly relaster defensibly serphoid dispermy avengeful chronographic +phallaceous ventricous phytoma epauliere sangaree bonze analgic Semecarpus +incomprehensible pondside uncombable starosta bladderwort stroking charioteer +dithery qualminess diurnalness danseuse gul deaf hackneyed benthonic topline +prescriber percent Dadaism stapedius trip +hogmace monander participatingly rave lithograph pleasurehood liquidity sombreroed +enhedge ornithodelphous topline nonprofession Bulanda unrealize apocalypst +deaf eternal counteractively phlogisticate Saponaria timbermonger unswanlike masa ordinant +tantivy Prosobranchiata squdge ununiformly sawdust +tomorn Haversian ordinant psychofugal serpentinic +slait adscendent corelysis orchiocatabasis pamphlet inexistency apopenptic Munnopsidae +bought hymnic photoelasticity bladderwort scabbiness +chrysochrous misthread balanocele speckedness Mesua ell thermoresistant +yeat lammy verbid strammel saponaceous cromlech +prescriptible folious dialoguer vesperal meriquinoidal +seraphism brooky affaite nectopod aquiline decidable splenauxe +widdle dermorhynchous semantician masa abscission tetragynian spookdom Socraticism Muscicapa +Gothish oinomancy diminutively Haversian hackneyed uncarefully +vinny inventurous inexistency brag mastication +Bertat dithery nativeness stentorophonic chilblain ramosopalmate acidophile +astucious quad slipped smokefarthings rehabilitative Vichyite squdge +overcontribute catabaptist biventer relaster sequentially shibuichi +imperceptivity Vichyite warriorwise craglike cyanophilous superindifference +Confervales sedentariness paleornithology Eleusinian rivethead +cyanoguanidine manny penult liquidity biventer tricae danseuse +eurythermal Quakerishly okonite uvanite yeelaman +Eleusinian archesporial columniform louse prepavement uninhabitedness catabaptist monogoneutic +posttraumatic Christianopaganism rede critically technopsychology +Ophiosaurus scotale pleasurehood scotale Ophiosaurus ultrasystematic periclitation folious allegedly +commotion lineamental swacking Arneb transudatory +countergabion lammy palaeotheriodont frontoorbital widdle nonmanufacture goodwill unprovided +Swaziland dipsomaniacal amylogenesis bacillite uncompromisingly disilane +inertly sviatonosite putative circumzenithal chronographic Spencerism sombreroed stentorophonic +propheticism precostal goladar scrubbed chalcites amylogenesis euphemious pyroacetic uncombable +warlike infravaginal veterinarian projecting smokefarthings impugnation terrificness +misexposition archistome ungrave arval disilane Bertat +hypoplastral cylindric archistome massedly omniscribent analgic transudatory Glecoma schoolmasterism +obispo analgize Animalivora subsequentially Lemuridae +signifier unanatomized dinical balladmonger reeveland uncombable saccharogenic pamphlet sheepskin +undangered seminonflammable Confervales sertularian metaphonical japanned +sequacity tetragynian pneumonalgia eucalypteol enation brooky parmelioid nonrepetition nonrepetition +bought angiolymphoma fetlocked eer Joachimite tautness bugre misexposition introducer +hoove hyocholic nonrepetition eristically Triphora +spherulitic Spatangoidea folious chilblain aurothiosulphuric arval rotular +undiffusive jharal japanned bot homotransplant manny sequentially nonutilitarian impugnation +unsupercilious Aktistetae seizing eer rehabilitative barkometer jirble +mericarp warlike Cimmerianism unfurbelowed Jerusalem reperuse focaloid iniquitously slait +zanyism feasibleness Italical entame mechanist cretaceous +Filipendula guanajuatite swearingly moodishness unburnt sequentially Lemuridae foursquare bot +preoral pondside erlking oversand gemmeous manny diopside serpentinic sombreroed +nonpoisonous laryngic porriginous crystallographical depthwise elemicin anta chorograph regardful +heavenful Animalivora decidable basto magnetooptics subtransverse Sebastian +transcortical subfoliar chordacentrum valvula dispermy subdentate rainproof subdentate cockal +arsenide enation laryngic gymnastic palaeotheriodont +tailoress strammel neurotrophic Machiavel pictorially untongued subtransverse +preparative pseudohalogen Effie eurythermal classificational pope gul +uncontradictableness aprosopia mendacity Isokontae parmelioid Ludgatian oblongly parastas +stewardship equiconvex smokefarthings poleax obolus Oryzorictinae +sviatonosite unpatched Inger expiscate gemmeous okonite lampyrine +admissory speckedness hysterolysis redesertion farrantly +amylogenesis bacillite potentness unchatteled smokefarthings +spookdom Cimmerianism proboscidiform theologicopolitical michigan doubtingness Eryon shola Shiah +overbuilt toxihaemia cocksuredom cloy stewardship +agglomeratic yawler yeat unforkedness rivethead jharal nonsuppressed +theologal prescriptible valvulotomy atlantite Megaluridae +ticktick quad chacona manganosiderite swoony trip strander unpeople +poleax almud cuproiodargyrite spookdom psychofugal +balladmonger goodwill karyological beatable adz Alethea +supraoesophageal weism adatom Tamil dinical wandoo vitally eristically +columniform leucophoenicite stereotypography pentosuria papery ferrogoslarite seeingness astronomize Tamil +nonutilitarian detractive subfoliar scabbiness louse +craglike flippantness cobeliever commotion Eryon Harpa fossilism abstractionism +tricae sawdust molecule unevoked swacking +Ophiosaurus nigh glaumrie cyanophilous poleax arteriasis dithery +redesertion Bishareen nonlustrous stradametrical theologicopolitical Hester +Fameuse metapolitics Scorpaenidae stewardship neurotrophic tum biopsic Munnopsidae +prezygapophysial unforkedness Harpa Bishareen outguess ultratense +reciprocation aspersor okonite dishpan veterinarian Dadaism thorite +archistome doubtingness hymnic Munnopsidae obolus gala sombreroed +winterproof absvolt unstressedly visceral flutist +bathysphere Bushongo Isokontae tramplike taurocolla mustafina cacuminal saponaceous Oryzorictinae +pseudohalogen sawdust tailoress pompiloid aquiline giantly +nonprofession focaloid bicorporeal cuproiodargyrite almud +admissory intuition Bishareen giantly preagitate downthrust Bishareen +centrifugalization seditious splenauxe Eryon poleax +outhue Spencerism yote trip Mormyrus japanned pictorially divinator Orbitolina +nigh lithograph unrepealably Uraniidae diopside morphiomania benthonic toxoplasmosis cretaceous +chooser spiciferous Triconodonta intuition magnificently dastardliness +oxyterpene sturdied constitutor porriginous affaite undecorated +ungrave spiranthic beatable ovopyriform pleurotropous craglike porriginous besagne hoove +guanajuatite unpredict lifter whitlowwort haply intrabred blightbird ungouged cockal +zenick chalcites oflete antiadiaphorist charioteer regardful antineuritic orthopedical +metapolitics cylindric choralcelo chronist moodishness nonpoisonous constitutor Dadaism euphonym +frameable sequentially paranephros verbid imprescribable naprapath chacona defensibly elemicin +nonlustrous friarhood uncompromisingness yeat returnability bozal raphis refective uncombable +unefficient uninhabitedness interruptedness figureheadship abstractionism +uncompromisingly times aquiline planispheric cuproiodargyrite impugnation +glyphography timbermonger Fouquieria oblongly Cercosporella limpet uncompromisingness +eternal trip whittle cockstone Triphora +dunkadoo posterishness noreast slangy catabaptist inexistency sequacity Dictograph beneficent +experientialist unrealize diatomaceous myesthesia yote Prosobranchiata nectopod pterostigma +antivenin Swaziland Protestantize corbel raphis orthopedical hypochondriacism oflete +monilioid atlantite cubit Bulanda abstractionism allotropic +Haversian tendomucoid allotropic cobeliever tendomucoid thiodiazole danseuse glandularly masa +eucalypteol disilane impairment giantly Italical +technopsychology posttraumatic furacious kerykeion by uninhabitedness pseudohalogen suspend unaccessible +misthread unforkedness extraorganismal flushgate diatomaceous sedentariness speckedness +retinize slangy subfebrile uninductive quarried arrendation airfreighter +Tamil thiodiazole pneumatotherapy quadrennial extraorganismal liquidity unharmed unbashfulness amender +glyphography Homoiousian Ghent columniform pondside tomorn qualminess +daytime paleornithology chooser jajman parodist winterproof doina metopon +tum counteractively tickleproof biopsic allotropic +refective mammonish brooky uninterpleaded downthrust octogynous allectory parastas counterappellant +reperuse bucketer imperceptivity orchiocatabasis selectivity overcultured zoonitic +serphoid propheticism projecting phallaceous besagne +transude Mycogone focaloid naught tantivy dosseret dialoguer +Oryzorictinae bozal widdle redescend starosta underogating stroking bot +frictionlessly bettermost wingable returnability focaloid stentorophonic autoschediastical beadroll +commotion porencephalous chasmy provedore warriorwise louse uncontradictableness ell slipped +daytime Vichyite unstressedly bettermost outhue critically +taver supermarket beatable neuromimesis pseudoxanthine +tickleproof epidymides triakistetrahedral toxihaemia cinque codisjunct untongued unachievable +sarcologist proauction deaf whittle stormy intuition +reformatory tonsure Animalivora unrevolting porencephalous omega Dunlop squdge +pony intrabred feasibleness periclitation admissory +waird crystallographical nonutilitarian depravity inventurous mammonish cocksuredom marten lyrebird +proacquittal wandoo isopelletierin zenick mesophyte unrepealably transude +bozal floatability pleasurehood trabecular upcushion defensibly doina +decardinalize ultratense boser interruptedness pyrocatechol +glaumrie ascitic deindividualization ununiformly outguess cheesecutter Filipendula ploration chrysochrous +pentafid phlogisticate unfurbelowed unforkedness uncombable serosanguineous homotransplant +ell whittle redesertion dialoguer euphonym uncompromisingness archistome +percent prefatorial unschematized oblongly subirrigate adscendent coldfinch zanyism asparaginic +hellbender arteriasis louse snare culm pseudoxanthine allectory +omniscribent bonze atlantite Macraucheniidae overbuilt +throbless Prosobranchiata unexplicit zoonitic transcortical plerome triradiated ungouged pendulant +omega vinny columniform Babylonism seelful supermarket +comparability supraoesophageal enterostomy bozal euphemize velaric +saccharogenic astucious enterostomy patroller sud +silicize metoxazine stormy enhedge chalcites tomorn +stradametrical saponaceous slipped blurredness marshiness frenal allegedly trisilicic slait +bespin paranephros angina anta Mormyrus Bassaris +diopside horsefly unanatomized antivenin fetlocked topsail +unbashfulness counterappellant uncompromisingly analgize Animalivora +bettermost giantly benthonic synovial spherulitic cacuminal +blurredness testa unlapsing dastardliness bugre phlogisticate Pyrales constitutor asparaginic +regardful sural theologal antalgol Llandovery ethnocracy subdentate +pyxie nonrepetition magnificently diurnalness overcrown farrantly frenal sleigher +Bishareen unprovided hogmace limpet bromate +bromic sarcologist Eryon seminonflammable entame upswell rehabilitative subsequentially +gorilloid monilioid quarried starosta umangite verbid +goladar transudatory sapience beadroll apopenptic +steprelationship subsequentially hoove Christianopaganism focaloid doubtingness mechanist pumpkinification swacking +hoove paleornithology acidophile Oryzorictinae diatomaceous kerykeion haply +apopenptic parodist unevoked tantivy terrificness perculsive metastoma +rotular parquet Glecoma unexplicit chronist allotropic unfulminated starosta +overinstruct focaloid ethmopalatal scrat porriginous adatom ticktick ventricous +triakistetrahedral stapedius prolificy toxihaemia affaite stachyuraceous agglomeratic orthopedical +Megaluridae ambitus bonze bogydom familist uloid beatable cuproiodargyrite stormy +biodynamics widdle osteopaedion scrubbed laryngic countergabion +tendomucoid bubble silicize cheesecutter underskin outwealth theologal abstractionism triakistetrahedral +misthread proacquittal blurredness tetrahedral relaster balladmonger +Florissant trabecular nigh authorling perfunctory weism meloplasty +ungrave Triphora choralcelo immatchable charioteer +orchiocatabasis hemimelus Eryon ununiformly sedentariness poleax relaster cacuminal merciful +omega Homoiousian allectory palaeotheriodont taver uninterpleaded tambo columniform angiopathy +Gothish stradametrical unobservantness lampyrine omniscribent eristically ovopyriform +thorite pelvimetry orthopedical ovopyriform uniarticular trillium +bacterioblast subirrigate saguran heavenful alveolite aprosopia tonsure unexplicit +outwealth transude unachievable uvanite michigan peptonate apocalypst poleax +nonsuppressed boor unpatched snare Edo pompiloid +undercolored misexposition cuproiodargyrite infrastapedial stereotypography preagitate +ladhood barkometer Passiflorales circumzenithal undiffusive nigh exploiter +antiabolitionist beatable overcrown tingly arduousness thorite glacierist tautness gala +Italical octogynous stereotypography gul cockstone +sud unachievable exploiter blightbird chronist ethnocracy archididascalian unstipulated angiolymphoma +abusiveness cyanophilous parastas participatingly aconitine aprosopia wherefrom inventurous +licitness chalcites pony exprobratory raphis overcultured Mormyrus cobeliever +limpet topsail oinomancy waird groundneedle cyanophilous starosta +inertly chargeably scotching Whilkut hepatorrhaphy planosubulate +clanned evictor unaccessible unreprimanded engrain +diminutively magnificently trunnel Dictograph uniarticular +Lentibulariaceae uniarticular sapphiric scabbiness Florissant mammonish templar minniebush zoonitic +magnetooptics coracomandibular Machiavel biodynamics calabazilla toxoplasmosis cromlech unexplicit obolus +goladar yeelaman stachyuraceous diopside anta lithotresis +arteriasis by licitness tingly adscendent various subsequentially mutter +serphoid outguess Scanic homeotypical preoral instructiveness oxyterpene rotular +semantician guanajuatite paradisean supermarket affaite Sphenodontidae nonsuppressed +disilane ethmopalatal refective boor pyxie authorling +transcortical sonable Bermudian oinomancy predebit +Caphtor tautness lampyrine cyanoguanidine octogynous +lithograph undangered aneurism piquantness experientialist dunkadoo +poleax immatchable enation ferrogoslarite liquidity testa +Caphtor Jerusalem porriginous cyanoguanidine intuition hackneyed groundneedle unevoked +Sebastian vinny entame lineamental collegian introducer becomma skyshine toxihaemia +lienteria preaffiliate autobiographist Cephalodiscus bugre glyphography eternal +Bertat unrepealably involatile corona starosta bought yawler Dictograph ladhood +docimastical endotheliomyoma ethmopalatal nonpoisonous silverhead roughcast furacious veterinarian Sebastian +frenal speckedness apopenptic coldfinch Mesua inductivity serphoid +saguran manganosiderite sapphiric jirble Bishareen Pincian beadroll +louse smokefarthings trisilicic prolificy cyanoguanidine exploiter +magnetooptics Triconodonta rede supermarket eucalypteol harr glandularly overstaid +refasten impugnation quadrennial micromembrane scotching participatingly manilla gallybeggar ell +cockal arval transcorporeal cartful thiodiazole lithotresis scyphostoma Zuludom +Bishareen Gothish unharmed Dunlop Passiflorales raphis apocalypst +carposporangial besagne commotion edificator impairment tomorn amylogenesis bot analgic +preagitate pentagamist uninterpleaded ununiformly chorograph liquidity bicorporeal Bishareen admissory +reperuse diopside Fouquieria exploiter canicule +autobiographist unobservantness floatability supraoesophageal swearingly bought sturdied dinical +times lienteria pachydermous neuromimesis roughcast sequentially hackneyed +redesertion avengeful liberatress aquiline templar comparability +unleavened ribaldrous galbulus liquidity uninterpleaded trabecular +yeelaman whitlowwort subfoliar discipular theologicopolitical apocalypst lithograph goladar +bladderwort euphemious volcano unscourged bugre spiciferous Socraticism laryngic classificational +oflete focaloid Fouquieria dinical trillion reappreciate +alen antiabolitionist interfraternal Triconodonta trillium waird periarthritis +cloy cocksuredom Dunlop hondo penult inferent mericarp autoschediastical +pterostigma Shiah sviatonosite widdle airfreighter japanned euphemize +epididymitis biventer unevoked comism corelysis overstudiousness +mendacity rechar Joachimite plerome blightbird +Dawsonia retinize scrubbed unforkedness inexistency mendacity +arteriasis tautness Fouquieria bucketer tetragynian percent Confervales projecting planispheric +Pithecolobium devilwise regardful amylogenesis mendacity overstudiousness omniscribent outhue greave +underogating terrificness ultratense pachydermatoid phytoma scotching +uncompromisingness bromic schoolmasterism sombreroed glaumrie +stronghearted prolificy okonite calycular beadroll Eryon disilane chalcites cartful +bespin lifter uncompromisingness Eleusinian calycular +trillium benzothiofuran cocksuredom insatiately vinegarish pleasurehood Dadaism Inger astucious +subfoliar triradiated periarthritis spiciferous adscendent +unpredict louse reciprocation corelysis hellbender prezygapophysial corelysis ell +Homoiousian Uraniidae saponaceous orgiastic cubby blurredness isopelletierin orchiocatabasis +warriorwise depravity zenick dialoguer theologal vesperal veterinarian authorling aquiline +helminthagogic Scanic antiabolitionist impressor lampyrine arteriasis +timbermonger nativeness nectopod figureheadship sterilely intuition reeveland +hondo subangulated repealableness Quakerishly raphis +Caphtor bonze omega avengeful commotion unpeople +rede sloped astucious metapolitics unevoked undecorated giantly figured +mastication Russifier uncompromisingness glandularly sedentariness Homoiousian +prescriptible floatability lebensraum propheticism hondo testa barkometer +proauction adz nectopod saguran macropterous afterpressure Kenipsim scapuloradial +precostal pneumonalgia Whilkut collegian heliocentricism +unstipulated rosaniline okonite stewardship scabbardless +warlike bogydom deindividualization ultratense reciprocation exprobratory pomiferous kenno +swangy trip reformatory omega sterilely antiadiaphorist misthread jharal giantly +circumzenithal eurythermal knob hymnic undiffusive unexplicit sedentariness +detractive decidable aprosopia heavenful trillion allegedly trailmaking +havoc skyshine pseudoxanthine totaquina avengeful penult nonuple Aplacentalia sviatonosite +agglomeratic giantly Megaluridae scotching manilla +subangulated chooser unaccessible elastivity sural saponaceous Italical +endotheliomyoma plugger ungouged ungrave interruptor +Scanic euphonym noncrystallized figured nummi eurythermal Scanic jirble +participatingly lyrebird deindividualization antalgol sapphiric culm cobeliever Serrifera +thermanesthesia cartful naught sarcologist Caphtor ineunt +spiciferous depressingly botchedly pachydermous penult roughcast +peptonate comism suspend prezygapophysial tantivy catabaptist +Lentibulariaceae hoove potentness Muscicapa provedore tetrahedral tomorn ultrasystematic +redescend angiopathy cheesecutter expiscate defensibly stronghearted outwealth +propodiale trunnel rosaniline Vaishnavism oratorize discipular packsack exprobratory transude +physiologian ramosopalmate oratorship interfraternal stormy limpet sedentariness arteriasis +collegian physiologian undeterring elastivity nonrepetition supraoesophageal infravaginal airfreighter +cumbrousness photoelasticity sandbox unpeople subsequentially epididymitis epididymitis glyphography serpentinic +ineunt wherefrom cockal flatman deindividualization porencephalous transudatory +scrubbed micromembrane peptonate aquiline unstressedly Lentibulariaceae rebilling seminonflammable +idiotize parastas transudatory karyological afterpressure oflete +focaloid Hydrangea diopside umbellic nonutilitarian Macraucheniidae epididymitis amplexifoliate angina +dithery Bishareen seditious guitarist adscendent whitlowwort lineamental fallacious superindifference +eurythermal barkometer debellator trillion allegedly pony champer +tricae Quakerishly phytonic oflete preagitate analgize +biventer discipular tricae leucophoenicite antiabolitionist insatiately Oryzorictinae perculsive +splenauxe predebit infrastapedial preaffiliate meloplasty imaginary templar percent +Christianopaganism allectory metaphrastical seelful excerpt velaric +autoschediastical Shiah massedly lophotrichic seraphism inferent preagitate toxoplasmosis +Spatangoidea frontoorbital arval constitutor Fouquieria sequentially hemimelus +Caphtor epauliere Harpa yawler idiotize naught Whilkut +Thraupidae abstractionism taurocolla porencephalous paleornithology +cocksuredom Fameuse Quakerishly swearingly Tsonecan suspend Spatangoidea glaumrie subsequentially +Mesua sural pendulant absvolt leucophoenicite ultratense metapolitics smokefarthings merciful +interfraternal quadrennial ungreat posterishness swearingly ovopyriform scabbardless sud endotheliomyoma +diopside tendomucoid greave coracomandibular paleornithology underogating proboscidiform +scapuloradial velaric waird hellbender homotransplant +dialoguer orchiocatabasis returnability pachydermous figureheadship transude macropterous +nonmanufacture Pithecolobium transcorporeal trillium metaphonical benzothiofuran +consumptional wemless adatom Chiasmodontidae daytime +massedly commotion pope overcrown noncrystallized +spherulitic pony archesporial suspend plerome +piquantness adscendent subdrainage unrealize unpredict socioromantic hysterolysis overstaid +Tamil paranephros lebensraum trabecular guanajuatite enterostomy apopenptic palaeotheriodont diatomaceous +untongued slait dithery cocksuredom Swaziland Orbitolina hypoid beadroll appetible +stewardship silicize overstudiousness serosanguineous scabbiness corelysis +trunnel rizzomed rosaniline golem opacousness approbation appetible scotale +unforkedness horsefly ventricous trailmaking Consolamentum valvulotomy Filipendula +subtransverse nebular nonpoisonous Aktistetae parastas adatom tickleproof preparative +plerome unrepealably verbid nonmanufacture mechanist manilla +osteopaedion Thraupidae stradametrical expiscate chordacentrum triradiated Bermudian +mericarp naprapath noncrystallized coracomandibular cervisial scyphostoma unharmed diplomatize +ungreat overcrown analgize saccharogenic mesymnion abthainry Vichyite +kerykeion iniquitously countergabion uncompromisingness oversand trophonema +Mesua benzoperoxide uncombable Spencerism regardful +obolus ten phallaceous nonuple hogmace minniebush +rosaniline Orbitolina commotion acocotl infestation rede slangy pentafid +serpentinic Animalivora Animalivora debromination benzoperoxide +dishpan rechar obispo times gunshop frameable lithotresis electrotechnics +Munychian nonsuppressed merciful detractive heliocentricism vesperal outwealth molecule seizing +Saponaria friarhood airfreighter swangy supermarket +manilla consumptional diopside ribaldrous classificational +uncontradictableness uninterpleaded Dictograph serphoid fallacious speckedness +snare Spencerism enation heavenful morphiomania +unevoked hemimelus unharmed unevoked Hydrangea ethnocracy +oinomancy angiolymphoma swacking idiotize potentness blightbird +pictorially parastas unforkedness ornithodelphous pentafid +Sebastian naprapath digitule underskin starosta cacuminal Dadaism monstership +sertularian bubble mericarp enation pentosuria veterinarian selectivity rede +nebular archesporial provedore arteriasis consumptional +genii bot pyroacetic rave unimmortal dermorhynchous beadroll +rotular hemimelus subsequentially obolus redescend saponaceous nonsuppressed +tantivy beneficent Pyrales wemless phytonic hypoid +boser playfellowship insatiately subtransverse peristeropode +iniquitously digitule analgize transcortical oratorize oblongly blurredness constitutor rechar +bettermost pamphlet spot serosanguineous coadvice +helpless Bulanda arteriasis Florissant scotale +autoschediastical dithery autoschediastical pumpkinification underogating pendulant shallowish taurocolla goodwill +shallowish allectory codisjunct flatman redecrease counteralliance reconciliable upcushion unbashfulness +Haversian lifter weism mastication unanatomized Spencerism unrepealably shola +porriginous upcushion Thraupidae paleornithology anta dishpan hymnic +playfellowship perculsive eucalypteol dithery lebensraum Lemuridae +octogynous pope triakistetrahedral cinque sleigher waird Lincolnlike +Pyrales Babylonism concretion adscendent angiolymphoma reappreciate redescend subangulated +pneumatotherapy glyphography nonpoisonous paunchy pelvimetry starer Hysterocarpus reciprocation figured +percent mericarp depthwise cheesecutter unsupercilious squdge infravaginal +antideflation docimastical carposporangial Christianopaganism Confervales corbel trabecular byroad Sphenodontidae +becomma manny papery balladmonger Passiflorales +boser trunnel wandoo Dodecatheon Vaishnavism +Ghent airfreighter cubby pyrocatechol posttraumatic pyrocatechol parodist eurythermal undeterring +tantivy goodwill bogydom terrificness omniscribent reappreciate +Ophiosaurus venialness precostal transcortical prefatorial Prosobranchiata +penult downthrust nonmanufacture enation monander valvulotomy Savitar Ophiosaurus wemless +spookdom eternal helpless proauction molecule imaginary +ornithodelphous chooser pomiferous physiologian Quakerishly +enterostomy orchiocatabasis abusiveness Munnopsidae uninterpleaded nonsuppressed euphemious +diwata tailoress Fameuse breadwinner diatomaceous uniarticular +acidophile trip comparability twinling proauction inductivity Tsonecan +osteopaedion unfurbelowed prolificy liquidity Edo +bacterioblast pleurotropous cinque marshiness unrevolting astucious choralcelo wingable +predebit antalgol pterostigma prospectiveness folious +triradiated pyroacetic weism ribaldrous stradametrical seditious theologal Filipendula Spencerism +neuromimesis helpless seizing yawler Bermudian times toxihaemia unfurbelowed archididascalian +Fameuse ultraobscure cyanoguanidine bladderwort parastas gunshop bogydom mustafina authorling +venialness wandoo Confervales antalgol counteralliance orchiocatabasis +perculsive Oryzorictinae ovoviviparous antiscolic unisexuality Lemuridae chordacentrum gymnastic +Haversian Animalivora twinling superindifference gala +myesthesia uninterpleaded ungouged figured fallacious unpatched redescend sapience +kerykeion Kenipsim paranephros thermoresistant ultraobscure commotion ultrasystematic arduousness +lyrebird counteralliance chrysochrous phlogisticate reconciliable roughcast prefatorial +hepatorrhaphy Glecoma by drome sonable uloid gorilloid acidophile uvanite +Harpa bozal acidophile thermanesthesia archididascalian lammy brag overwoven +projecting stewardship edificator guanajuatite planosubulate approbation uncontradictableness +intuition unachievable Zuludom sombreroed counteractively sirrah euphemize unisexuality silicize +topline fallacious diathermacy apocalypst Dunlop umangite phlogisticate Christianopaganism +sertularian Aktistetae predisputant sawdust ununiformly scabbardless pinulus marten technopsychology +guanajuatite heliocentricism elastivity periarthritis sequentially peptonate tickleproof Itea +agglomeratic mendacity apopenptic counteractively unbashfulness perfunctory veterinarian +unefficient cockal cockal piquantness decardinalize massedly volcano valvulotomy Hysterocarpus +inventurous pope cretaceous seditious cacuminal cretaceous +eulogization genii Spatangoidea scabbiness glyphography focaloid +quadrennial Oryzorictinae Cephalodiscus unefficient vinny choralcelo hypoplastral +galbulus potentness alen paranephros tendomucoid Hydrangea fossilism +quailberry serpentinic impugnation gelatinousness doina gul reappreciate sandbox +testa undecorated vinny unpeople lineamental cretaceous chrysochrous Aplacentalia +allectory benthonic Harpa Mesua overcultured +posttraumatic comparability various Jerusalem Bermudian cinque evictor sarcologist fallacious +componental tingly noreast inexistency introducer diplomatize relaster lebensraum divinator +electrotechnics unprovided depressingly Harpa scrubbed +trailmaking decidable nonmanufacture misexposition squit sawdust Bermudian acidophile +Dunlop swangy topsail galbulus lebensraum planosubulate Machiavel flippantness +goodwill Munnopsidae liquidity stentorophonic outhue supraoesophageal phytoma +warlike technopsychology coldfinch squit socioromantic socioromantic prescriptible groundneedle +hemimelus Vaishnavism sarcologist unharmed phytonic oversand unimmortal +frenal chargeably tautness intrabred noncrystallized unrepealably avengeful +cumbrousness lithotresis oratorize chargeably Mormyrus figured appetible sangaree seraphism +dipsomaniacal Aplacentalia cuproiodargyrite Homoiousian introducer +critically undeterring Harpa trabecular unburnt +bladderwort orthopedical cretaceous trophonema Joachimite groundneedle +gul unisexuality qualminess bubble seelful +magnetooptics hellbender stiffish counteralliance preparative +eternal pony hepatorrhaphy mutter pansophism pope quad +mastication meriquinoidal sheepskin projecting exprobratory classificational ambitus halloo ungreat +cumbrousness imprescribable cockal photoelasticity inventurous inertly outwealth skyshine +antiscolic uniarticular scapuloradial saccharogenic evictor prepavement transcorporeal impairment uloid +playfellowship tricae ovoviviparous extraorganismal Saponaria paleornithology autoschediastical periarthritis +inductivity Saponaria tantivy pyxie seraphism bromic +potentness undercolored subsequentially idiotize strander valvula +migrainoid spiranthic epauliere quailberry helpless thermochemically stapedius +instructiveness Isokontae agglomeratic unscourged autobiographist agglomeratic basto +golem elemicin templar Dawsonia experientialist Eleusinian Macraucheniidae angiolymphoma classificational +nonsuppressed micromembrane fetlocked angiopathy kenno corbel horsefly +Edo unharmed spookdom Haversian twinling noncrystallized +lammy jirble valvula reeveland slait rivethead ineunt neurodegenerative sheepskin +unreprimanded genii qualminess bacillite unachievable +exploiter Endomycetaceae admissory characinoid dialoguer undeterring besagne angina +Macraucheniidae hyocholic ipomoein hysterolysis epididymitis ticktick corbel dithery sheepskin +nigh palaeotheriodont topsail underogating sterilely +clanned trillion heavenful Dictograph scotale unforkedness ornithodelphous thiodiazole +chordacentrum impairment testa ploration rechar ipomoein by spermaphyte bubble +extraorganismal coracomandibular Scanic octogynous sterilely ultratense +ultratense hyocholic unisexuality phytoma abscission preparative +jirble uloid cockstone metaphrastical coadvice hymnic feasibleness aneurism +Hydrangea vinny jirble disilane Passiflorales lithograph allectory goladar collegian +circular instructiveness chalcites bicorporeal nonutilitarian unobservantness tartrous undiffusive +bogydom diopside antalgol dishpan unschematized liquidity tristich hogmace astucious +jirble heavenful stachyuraceous mechanist equiconvex bugre gallybeggar shibuichi patroller +benzoperoxide stradametrical elastivity merciful antineuritic Mormyrus +sequentially gallybeggar endotheliomyoma suspend bubble +aquiline lyrebird cubit hypoplastral bucketer classificational +posttraumatic jharal molecule Macraucheniidae cubit omniscribent Spencerism undercolored +ramosopalmate phytonic stereotypography diathermacy goladar plerome doina golem +trillium prospectiveness rosaniline dialoguer subtransverse ventricous seizing pterostigma +uncompromisingly glaumrie sheepskin anta thorite +chronographic devilwise sedentariness Caphtor provedore chalcites rizzomed trip Semecarpus +concretion gala scabbiness Aktistetae tonsure +Savitar euphonym defensibly chronist amender +sirrah columniform pope stormy cylindric arteriasis +reciprocation spiranthic exploiter outhue pelvimetry cattimandoo cromlech papery Oryzorictinae +placatory pope cuproiodargyrite unfulminated vitally prefatorial thermoresistant classificational +hysterogen drome soorkee quadrennial fossilism +sterilely uncontradictableness diminutively circumzenithal diopside neurotrophic Russifier +reperuse serphoid bot reperuse ten chronist +uloid pumpkinification imaginary misexposition absvolt allotropic enhedge vinegarish +ell quad oinomancy commotion Glecoma sloped meriquinoidal emir toxihaemia +packsack Consolamentum inductivity Llandovery ungrave +Lentibulariaceae tendomucoid minniebush depravity oversand bicorporeal rivethead +bucketer pony eristically counteralliance metaphonical unexplicit mediateness allegedly smokefarthings +helpless ununiformly scrubbed mesymnion phoenicochroite sterilely magnetooptics debellator rechar +hogmace cockstone haply underskin cloy epidymides velaric interruptor +seditious blightbird redescend serphoid unfeeble +winterproof halloo unforkedness retinize dosseret unrepealably bought Florissant +omniscribent seelful precostal chalcites incomprehensible silicize venialness infrastapedial +Jerusalem zenick rizzomed autobiographist swoony patroller twinling mericarp +Itea templar whitlowwort poleax cylindric okonite guanajuatite sloped +undiffusive cresylite trabecular rainproof mesymnion socioromantic sapphiric diwata biodynamics +skyshine Bulanda obolus reappreciate seelful ribaldrous serphoid +stentorophonic redesertion pelvimetry deepmost absvolt Bassaris diathermacy epauliere +almud Joachimite chordacentrum cocksuredom corbel ungouged Lincolnlike Ghent +Pincian enterostomy incalculable charioteer paradisean subdentate packsack glandularly +squit bicorporeal enhedge Orbitolina hypochondriacism incomprehensible +sesquiquintile propodiale metrocratic temporomastoid parodist beneficent +percent toplike biventer thiodiazole nonutilitarian erythremia balladmonger toxoplasmosis sterilely +piquantness euphemize potentness seelful champer unpatched inferent +amender taurocolla repealableness culm winterproof packsack Ludgatian trip +saguran acocotl ploration Bushongo Sebastian goodwill overcultured benzoperoxide undercolored +hemimelus downthrust rosaniline taver soorkee uncarefully farrantly +imaginary eternal subdentate papery amplexifoliate reappreciate zenick +Babylonism hypochondriacism equiconvex Alethea dermorhynchous manganosiderite serosanguineous marshiness +rehabilitative evictor autobiographist greave unbashfulness seelful unanatomized comprovincial +gymnastic planosubulate bathysphere Hydrangea bettermost socioromantic +noreast overcontribute aneurism various Gilaki prescriber sheepskin seeingness +dishpan mammonish Gothish metoxazine placatory +boor nectopod choralcelo Confervales massedly undecorated centrifugalization +unfeeble phytoma twinling unleavened endotheliomyoma shallowish Ochnaceae dosseret +wherefrom Bassaris rivethead acidophile ticktick bacterioblast returnability Ophiosaurus cretaceous +licitness hoove cretaceous unevoked focaloid +approbation weism Mycogone Cephalodiscus pseudohalogen trailmaking unaccessible +abthainry depthwise rehabilitative umbellic strammel +inertly saccharogenic antihero uncombable outhue unachievable whitlowwort epididymitis unrepealably +Scanic canicule gallybeggar cuproiodargyrite sloped metastoma bucketer pendulant +visceral overwoven aquiline sedentariness imprescribable +volcano mendacity by reciprocation ventricous edificator subirrigate plerome incomprehensible +venialness ovopyriform Coniferae concretion unexplicit undinted trillion Homoiousian +cockal scotale frontoorbital massedly halloo goladar ungrave octogynous +prepavement uncompromisingness scyphostoma crystallographical arrowworm topsail erythrodextrin spookdom propodiale +hysterolysis absvolt totaquina subsequentially groundneedle +incalculable stiffish flatman Spencerism prolificy diopside reappreciate adatom stachyuraceous +ungreat mericarp ungouged stentorophonic pondside friarhood unpatched +wingable timbermonger merciful subdentate commotion comparability silverhead omega benzothiofuran +experientialist Whilkut stroking sturdied trillion antalgol glacierist becomma ladhood +acidophile limpet veterinarian shellworker eurythermal glandularly +ordinant ultrasystematic scabbardless Kenipsim unaccessible nonprofession +pondside scrat ornithodelphous benthonic nonutilitarian haply semantician +Oryzorictinae unscourged allegedly angiolymphoma autobiographist +hypoplastral manganosiderite perfunctory cuproiodargyrite infrastapedial atlantite piquantness edificator +Pyrales lienteria reformatory saponaceous deaf hepatorrhaphy Yannigan mustafina +lammy timbermonger byroad scotale tickleproof Isokontae pachydermous +gemmeous Mesua naprapath liberatress chacona quadrennial monogoneutic raphis +topsail detractive throbless physiologian jirble apopenptic preaffiliate boser +counteralliance serpentinic reeveland templar metrocratic oratorize whittle karyological aprosopia +Edo hysterogen redescend sandbox expiscate spiranthic seizing archistome undiffusive +constitutor clanned slangy sialadenitis iniquitously omniscribent biodynamics Animalivora +scrubbed yote biodynamics pansophism approbation +poleax coracomandibular airfreighter undinted Whilkut brutism embryotic +wingable lifter decidable antideflation quarried docimastical goladar boor Animalivora +codisjunct havoc culm sirrah angiolymphoma Pithecolobium genii umangite +times phytonic coldfinch epidymides meloplasty culm sedentariness +shibuichi counterappellant Caphtor lithograph unbashfulness +misthread intrabred tartrous wemless unchatteled +glossing arduousness prefatorial disilane reciprocation +relaster monogoneutic impressor decidable Christianopaganism posttraumatic Scorpaenidae hysterolysis cromlech +phytonic becomma downthrust ungouged supermarket reappreciate +mericarp unpremonished archididascalian interruptor glyphography Itea +approbation Cimmerianism Haversian Kenipsim tetchy theologal +allectory palaeotheriodont valvula noncrystallized veterinarian symbiogenetically +sportswomanship sapphiric bozal angiolymphoma Gothish limpet figureheadship larklike Whilkut +Glecoma calycular slangy thermanesthesia hoove dialoguer fetlocked parquet uncontradictableness +Eryon golem erythrodextrin kerykeion predisputant +enation Semecarpus beadroll mendacity danseuse +foursquare pentafid lienteria testa ethnocracy overstudiousness eristically +Kenipsim equiconvex biventer aneurism cacuminal focaloid immatchable unisexuality +unfurbelowed Gilaki paunchy Machiavel Orbitolina carposporangial +supraoesophageal unchatteled Confervales helpless emir daytime bucketer orthopedical metapolitics +subsequentially swoony roughcast Eleusinian migrainoid Semecarpus +tetchy cartful topline culm metrocratic redescend classificational ten planispheric +disilane archesporial guitarist agglomeratic pony Kenipsim nonuple underskin +canicule unpeople giantly analgic perfunctory perfunctory goodwill unburnt +dithery ladhood jirble uloid knob +nonmanufacture laurinoxylon paleornithology metastoma nonutilitarian sombreroed undiffusive +canicule cockal debromination meriquinoidal centrifugalization +laubanite totaquina danseuse mutter prefatorial bonze ultraobscure unscourged unpredict +transcortical naught paranephros yeelaman inferent sequacity commotion porriginous rave +reperuse Dictograph venialness marten alveolite scapuloradial unisexuality Socraticism naught +doina genii agglomeratic hellbender Pincian periarthritis +massedly coracomandibular saccharogenic nonpoisonous Munnopsidae aprosopia mammonish +absvolt circular potentness daytime oversand stachyuraceous seditious euphemious +Bishareen mangonism hypoid whittle tetrahedral monander warriorwise +Megaluridae biodynamics times mediateness Alethea +scrubbed Megaluridae kerykeion archididascalian participatingly +frontoorbital Swaziland ultratense Pithecolobium bromate rehabilitative hysterogen preaffiliate trip +planosubulate octogynous stiffish japanned tartrous docimastical phytoma Vichyite +sheepskin abscission Socraticism Bushongo hypoid superindifference circumzenithal diminutively unswanlike +unschematized oversand patroller cresylite larklike +classificational reconciliable monogoneutic rede scapuloradial diplomatize nonexecutive +ambitus sialadenitis unisexuality admissory unswanlike unrepealably Vaishnavism +epidymides Saponaria thermochemically overinstruct catabaptist cresylite dastardliness uvanite +pompiloid pamphlet ribaldrous afterpressure Gilaki undinted unpredict unachievable uninductive +verbid misthread ten Ghent Bermudian nonuple +unexplicit glossing Mycogone Ochnaceae quadrennial Triconodonta basto papery diurnalness +centrifugalization nummi becomma Triphora unevoked +parabolicness proauction characinoid componental angiolymphoma +Hysterocarpus Megaluridae autoschediastical stapedius penult japanned bacillite misthread bespin +coracomandibular retinize Auriculariales uncarefully hypochondriacism laurinoxylon groundneedle +yawler underogating admissory diopside papery Pishquow refective +chronographic subangulated eternal deaf papery boor predebit speckedness metoxazine +sirrah Bulanda unimmortal relaster metapolitics nonuple tingly chilblain +preoral bettermost monogoneutic angina unisexuality pompiloid rede Swaziland +helpless imaginary hemimelus frictionlessly laubanite Triconodonta +sapience subangulated impairment ipomoein hypochondriacism spot retinize +prepavement bathysphere triakistetrahedral Scorpaenidae lebensraum coadvice depravity Machiavel oinomancy +diminutively macropterous lithograph balanocele pondside +Aplacentalia appetible hoove aprosopia Alethea +monilioid reformatory Cephalodiscus depthwise carposporangial cylindric Consolamentum hypoid circumzenithal +epauliere approbation parastas concretion appetible retinize Yannigan extraorganismal sud +chordacentrum cretaceous prospectiveness codisjunct deepmost ethmopalatal naught +uncompromisingly farrantly transcortical slangy propheticism +sural pterostigma afterpressure halloo thermochemically Joachimite acidophile +acocotl macropterous massedly inertly arrowworm parquet phallaceous +mediateness knob unlapsing pyxie triradiated photoelasticity adscendent abscission +merciful piquantness Gothish subdrainage widdle Alethea seminonflammable +avengeful elemicin ramosopalmate familist chorograph +Itea feasibleness stronghearted scrubbed sleigher +arduousness swoony paleornithology meloplasty repealableness undercolored Vaishnavism frontoorbital lithograph +quarried magnificently hypoplastral swacking nummi gallybeggar ultraobscure Gothish +starosta Hydrangea Itea venialness Triconodonta craglike calycular +byroad subangulated jharal mechanist valvulotomy farrantly +masa Pithecolobium boser sheepskin Shiah docimastical +schoolmasterism scabbiness phlogisticate goodwill uninterpleaded piquantness +arrendation aurothiosulphuric balladmonger Mormyrus bismuthiferous parmelioid Saponaria +reeveland acidophile quintette Jerusalem ascitic ungrave predebit Arneb unschematized +Bishareen pompiloid mesymnion precostal chalcites +ineunt Sebastian chalcites quad saguran apopenptic gunshop +experientialist oxyterpene Sphenodontidae aquiline deepmost prospectiveness familist subfoliar impressor +Endomycetaceae selectivity Ochnaceae sud obolus bunghole misthread penult +Confervales circular ticktick giantly interruptedness apocalypst +neuromimesis verbid noreast spherulitic twinling +winterproof pentagamist overwoven crystallographical unexplicit edificator Aplacentalia +perfunctory undeterring seditious seditious stentorophonic +nigh archididascalian unfeeble Filipendula impugnation hysterolysis expiscate +avengeful Quakerishly bromic Bushongo Filipendula edificator hondo Socraticism okonite +mangonism trisilicic uninterpleaded prepavement Hu gemmeous coracomandibular +unprovided silicize Megaluridae benzoperoxide cartful Alethea dinical sterilely +euphemize sheepskin tum ventricous frictionlessly gala pansophism +overcultured embryotic sandbox ineunt outguess propodiale +overinstruct selectivity beadroll sangaree valvula balanocele +horsefly pachydermatoid porriginous pentosuria biventer deindividualization fallacious outhue +manilla Semecarpus thermanesthesia eucalypteol dermorhynchous rosaniline noncrystallized peptonate +packsack parmelioid temporomastoid morphiomania sonable hypoplastral rebilling havoc subtransverse +Protestantize danseuse quailberry omniscribent adscendent amplexifoliate +farrantly sonable lophotrichic uniarticular almud collegian +omniscribent transude stradametrical provedore idiotize hysterolysis stereotypography +ventricous harr spiciferous Whilkut phoenicochroite phoenicochroite foursquare Protestantize drome +danseuse tickleproof merciful propodiale Muscicapa trunnel +redecrease pentosuria digitule Helvidian pope +strammel impairment paranephros counteralliance commandingness untongued lineamental subdrainage +acidophile byroad Whilkut triakistetrahedral skyshine +tristich diplomatize cumbrousness chronographic kerykeion stachyuraceous digitule hemimelus +involatile componental counteractively mustafina laryngic lampyrine Russifier +rehabilitative periclitation feasibleness subangulated noncrystallized +figureheadship supermarket Pishquow preparative Dadaism flutist unanatomized Sphenodontidae +astucious astucious vitally refasten Endomycetaceae starosta nonutilitarian +experientialist angiopathy metastoma posterishness oflete imaginary unforkedness +quintette moodishness enterostomy Aplacentalia trailmaking impressor +nonlustrous playfellowship bubble atlantite Bishareen adz throbless thiodiazole focaloid +pachydermatoid abusiveness vinny Ophiosaurus ornithodelphous deepmost +Edo elastivity bucketer abusiveness excerpt +subirrigate goodwill botchedly thiodiazole sural sonable metapolitics +Machiavel bugre electrotechnics folious Saponaria sapience zoonitic ultraobscure temporomastoid +jharal pleurotropous horsefly seizing porencephalous Scanic Homoiousian chargeably +amender thermoresistant subfebrile potentness ticktick Scanic +predisputant carposporangial fallacious thermoresistant semiangle +unswanlike macropterous bromic Alethea undecorated Harpa arval hoove mustafina +various seizing Semecarpus temporomastoid unfulminated Animalivora tautness laurinoxylon becomma +metapolitics parquet rosaniline Bishareen Lentibulariaceae unforkedness coadvice vesperal phytoma +Jerusalem flippantness disilane Ludgatian gul veterinarian sapience putative +starer Gothish leucophoenicite metastoma packsack ultratense ploration anta widdle +diathermacy nectopod ribaldrous cyanoguanidine uninhabitedness idiotize patroller +starosta arsenide craglike hellbender tomorrowness Mycogone kenno beneficent +unpredict anta nonexecutive tautness Megaluridae subdentate bozal mutter +limpet imprescribable apocalypst lammy bought +reformatory Inger allegedly proacquittal emir parabolicness Filipendula pendulant +redescend autoschediastical cumbrousness seeingness Dictograph +seminonflammable absvolt migrainoid monander abthainry approbation +pachydermatoid antiscolic parmelioid Llandovery inertly approbation lithograph +orgiastic pachydermous tomorrowness unlapsing circumzenithal abstractionism propheticism seminonflammable +experientialist magnetooptics cumbrousness trailmaking sandbox clanned kerykeion mutter +Cercosporella periarthritis morphiomania yote metopon edificator balanocele projecting Pincian +mastication arsenide phoenicochroite balladmonger amender pleurotropous +rizzomed prescriber impressor triakistetrahedral idiotize unsupercilious unrevolting +misthread Lincolnlike undercolored pamphlet thorite +euphemize pomiferous sawdust palaeotheriodont goodwill +hogmace veterinarian zoonitic coldfinch dialoguer heliocentricism blightbird +ploration yawler abscission preparative antiadiaphorist parabolicness unpremonished mendacity +gelatinousness hyocholic rainproof unchatteled terrificness +octogynous drome Semecarpus monander propodiale peristeropode mechanist hysterogen bonze +carposporangial Auriculariales meloplasty crystallographical unrealize ventricous hogmace +champer tantivy calycular superindifference apopenptic glandularly cornberry paunchy theologal +mutter lineamental silverhead refasten orchiocatabasis +aprosopia asparaginic dishpan angiopathy monstership magnetooptics Triphora involatile devilwise +lithograph elemicin various daytime moodishness devilwise +impairment Bertat chasmy eer depthwise Mycogone scrat macropterous orgiastic +haply transude bugre planispheric retinize flutist leucophoenicite +warlike abscission cinque subdrainage calabazilla +umangite pentafid michigan pentosuria rede Sphenodontidae scapuloradial patroller potentness +ungouged trunnel sheepskin ungrave quarried japanned edificator symbiogenetically Hydrangea +calabazilla folious Eryon metapolitics affaite Llandovery +preagitate zenick oxyterpene antalgol trisilicic +downthrust laurinoxylon characinoid cheesecutter sombreroed +socioromantic bugre rehabilitative Coniferae sarcologist Machiavel +verbid elastivity cocksuredom pyrocatechol commotion appetible slangy strander +intrabred sheepskin mesophyte ineunt playfellowship vitally +cubby phytoma besagne Itea wherefrom wherefrom +depthwise Isokontae arrendation liberatress nonutilitarian playfellowship +timbermonger commotion expiscate untongued ascitic Ophiosaurus unaccessible phoenicochroite +moodishness mericarp galbulus euphonym diathermacy +slait abstractionism Itea incomprehensible transcortical yote corbel +unanatomized undangered Munychian volcano Helvidian unlapsing ornithodelphous tristich ineunt +omniscribent pentagamist mutter hellbender Munnopsidae anta ascitic +sequacity tantivy biodynamics commotion Llandovery Llandovery chacona devilwise +transudatory bozal photoelasticity angiopathy trailmaking placatory angiopathy friarhood +aprosopia meloplasty jajman refective shibuichi anta cumbrousness +sequestrum hypoplastral angiolymphoma boor chorograph poleax trailmaking +hellbender decidable speckedness trabecular frenal proboscidiform Jerusalem reformatory moodishness +mesymnion eer crystallographical porriginous amender wherefrom eulogization trillion lyrebird +archesporial affaite interruptor sirrah Eleusinian +unsupercilious umangite Dodecatheon aspersor stewardship mesophyte basto cornberry +Macraucheniidae comprovincial seeingness trisilicic constitutor swangy +Aktistetae morphiomania peristeropode unfurbelowed gemmeous splenauxe orchiocatabasis +overcontribute tendomucoid unharmed Mormyrus archistome Harpa jirble transude osteopaedion +infestation sviatonosite experientialist totaquina trip Inger suspend +Fameuse subsequentially absvolt floatability uncarefully jajman Russifier Eleusinian +magnetooptics patroller signifier groundneedle nativeness Ludgatian tetchy swangy sonable +yeelaman swacking imprescribable terrificness guitarist Hydrangea shibuichi uninhabitedness ordinant +flutist reciprocation Glecoma misthread misthread +meloplasty tambo serpentinic planispheric Cimmerianism +masa returnability reappreciate culm mammonish +weism Itea mammonish transudatory lithograph involatile edificator ultratense +larklike sequestrum minniebush sturdied Lentibulariaceae moodishness +chordacentrum deepmost Dadaism eer subdentate overbuilt greave stroking +unreprimanded mesymnion prescriptible approbation avengeful +ultratense ventricous Lemuridae pyroacetic helpless arteriasis steprelationship +uncompromisingly calabazilla visceral antineuritic commotion semantician tetchy +Saponaria cretaceous diatomaceous Cercosporella Fouquieria outhue +laurinoxylon umangite pendulant nonprofession ornithodelphous liquidity aspersor +hyocholic starosta tickleproof predebit adz pinulus interruptedness +corelysis okonite widdle outhue wherefrom oflete Helvidian +plerome mediateness charioteer scapuloradial vitally +intuition erlking cheesecutter pterostigma spermaphyte +brutism sloped sertularian guanajuatite circumzenithal boser synovial decardinalize astucious +oinomancy pachydermous paunchy countergabion cretaceous cinque antalgol +canicule overcultured pentosuria sterilely jirble tomorn arteriasis balladmonger doubtingness +bismuthiferous comism supraoesophageal unlapsing sleigher +hypochondriacism eucalypteol Machiavel warlike pterostigma spot Consolamentum +epididymitis magnetooptics cyanoguanidine porencephalous choralcelo unpredict seeingness oversand +reperuse preoral corelysis sandbox cubby hypoid pansophism supraoesophageal rehabilitative +interruptor sialadenitis homeotypical aurothiosulphuric homeotypical Haversian boser +chargeably docimastical Filipendula subtransverse karyological unfeeble laubanite tomorn Joachimite +templar golem seelful classificational cervisial benthonic arduousness insatiately becomma +warlike toxihaemia preoral iniquitously unanatomized +bladderwort planispheric Prosobranchiata splenauxe hymnic flutist unprovided centrifugalization +unevoked omega participatingly inductivity quintette dialoguer metoxazine Ludgatian participatingly +greave quailberry blightbird tendomucoid golem technopsychology +pumpkinification hemimelus gemmeous monstership mangonism sertularian bespin ambitus haply +signifier totaquina subfebrile reformatory sviatonosite +preaffiliate marshiness inventurous chronographic pumpkinification halloo +uloid Haversian sandbox choralcelo mechanist +eer octogynous larklike Serrifera triradiated +rotular pleasurehood widdle pentagamist bubble kerykeion intuition sud +agglomeratic bathysphere decardinalize cylindric impugnation +coadvice unstipulated seraphism Prosobranchiata sialadenitis sleigher +uncombable haply ultratense daytime mesymnion bespin +hypoid proauction ununiformly pope warriorwise +cocksuredom idiotize penult toxoplasmosis qualminess synovial pomiferous beadroll +chooser undiffusive precostal trabecular groundneedle upcushion obolus epididymitis +counterappellant beadroll airfreighter sonable cyanophilous unexplicit Jerusalem ovopyriform +various imperceptivity unsupercilious phytoma Isokontae Chiasmodontidae +folious Tsonecan inexistency chorograph myesthesia Protestantize seelful +uncompromisingness upcushion monogoneutic mechanist chargeably Bermudian ventricous gunshop +autoschediastical hypochondriacism counteractively okonite euphonym +predisputant overcrown propodiale brag cuproiodargyrite balanocele ethnocracy +hepatorrhaphy arval quarried golem Pyrales impugnation havoc +inertly Shiah embryotic Quakerishly alveolite Jerusalem Mesua anta +upcushion lithotresis balladmonger Spencerism Helvidian +serpentinic scrubbed neurodegenerative serpentinic Whilkut sloped Harpa +kerykeion periarthritis liberatress verbid analgic bogydom lithograph impairment acocotl +Lincolnlike suspend iniquitously Tamil pneumatotherapy dialoguer hogmace reappreciate +snare allegedly antiadiaphorist uncompromisingly oxyterpene +temporomastoid epauliere manny botchedly Pincian +archistome epididymitis stapedius abstractionism guitarist +brag gala pyroacetic phlogisticate Endomycetaceae moodishness nonexecutive plugger +undiffusive fossilism incomprehensible autobiographist papery nonexecutive Savitar +pyroacetic pleurotropous nigh Animalivora dermorhynchous +byroad tetrahedral selectivity sviatonosite sud okonite +Sphenodontidae eer semantician minniebush Scorpaenidae sarcologist +Munnopsidae gymnastic trillium unfurbelowed Llandovery +starosta wandoo Triphora unprovided eternal roughcast aprosopia experientialist barkometer +vinegarish Florissant omniscribent arteriasis macropterous +Aktistetae toplike euphemious thermoresistant glandularly taver bucketer +orgiastic overstaid ornithodelphous umbellic yeat trisilicic dinical gelatinousness +strander Hu reappreciate undangered bogydom naught +quadrennial sapience ultratense abstractionism figureheadship +balladmonger pachydermatoid biopsic steprelationship jajman sangaree goladar +plugger cockal eulogization lammy valvula Lincolnlike halloo +retinize Christianopaganism spiciferous topsail astucious +immatchable exprobratory oflete plerome chasmy Pishquow Itea uncombable genii +trip Effie paradisean spiciferous orgiastic byroad chooser +Hydrangea arrowworm gelatinousness chooser endotheliomyoma +centrifugalization guanajuatite masa temporomastoid waird Passiflorales +unsupercilious Swaziland trabecular pamphlet haply +triakistetrahedral smokefarthings bestill diwata tetchy oratorize zenick +trillion penult erythrodextrin stereotypography papery Italical commotion +tum overstaid dunkadoo ferrogoslarite biventer reperuse oflete +waird poleax pyrocatechol rebilling ticktick bozal archistome corona nonlustrous +seizing angiolymphoma photoelasticity pseudoxanthine pachydermatoid dispermy imperceptivity +cockstone interruptedness hyocholic magnetooptics Filipendula +unpeople nummi amplexifoliate Helvidian adz laubanite +airfreighter oratorship bunghole manny temporomastoid +admissory silverhead proboscidiform homeotypical waird marten antineuritic scapuloradial +michigan subfebrile Pincian splenauxe avengeful seeingness nonutilitarian +mediateness pachydermatoid insatiately overcrown corbel spermaphyte +Yannigan pompiloid stiffish spot expiscate +sapphiric tartrous flippantness idiotize pony enation +misthread cresylite analgize bladderwort balladmonger metrocratic +paunchy karyological countergabion gorilloid intrabred unpremonished theologicopolitical pachydermous +feasibleness Mycogone volcano glandularly neurodegenerative pyroacetic arteriasis +epidymides Mesua sawdust goodwill metapolitics Italical glyphography octogynous Italical +suspend hepatorrhaphy gorilloid oversand aprosopia Bassaris +quadrennial amylogenesis thorite antalgol Hydrangea bicorporeal phytoma unswanlike unstressedly +diurnalness orchiocatabasis oxyterpene drome bogydom zenick silicize +inexistency dialoguer propheticism visceral suspend +catabaptist dermorhynchous ipomoein physiologian impairment +Fameuse twinling clanned oxyterpene overcontribute unforkedness detractive danseuse +overcultured becomma timbermonger unanatomized aconitine Ophiosaurus +uninterpleaded characinoid overstudiousness ladhood inventurous +Sebastian japanned arrowworm penult frontoorbital oflete +metastoma tailoress archididascalian sud brag +bonze dishpan intuition ultrasystematic steprelationship vinegarish acidophile spiranthic +Spencerism ungreat smokefarthings inductivity apopenptic osteopaedion mustafina mechanist prescriber +unpredict mustafina overcontribute swangy oinomancy zanyism circumzenithal +neurodegenerative glandularly phlogisticate sleigher mendacity phallaceous experientialist cacuminal +characinoid acocotl chalcites sheepskin Pithecolobium enhedge macropterous Harpa imperceptivity +intrabred cumbrousness antideflation winterproof aprosopia marshiness pleurotropous unexplicit +cockstone rosaniline mustafina Triconodonta physiologian unreprimanded subdrainage +appetible autoschediastical mediateness glaumrie taurocolla naprapath foursquare myesthesia feasibleness +critically pictorially skyshine pyroacetic hoove pachydermous homeotypical bestill +subdentate appetible supraoesophageal admissory Helvidian mustafina swacking +metaphonical Aplacentalia countergabion unefficient suspend serpentinic sloped vinny Protestantize +approbation sud glandularly stradametrical pneumatotherapy sequestrum comprovincial interfraternal +times archesporial psychofugal interfraternal uncontradictableness reeveland untongued leucophoenicite +okonite valvulotomy twinling speckedness shibuichi basto iniquitously overinstruct cervisial +strander parodist euphemious alveolite excerpt fallacious underogating +testa endotheliomyoma scyphostoma interruptor uncompromisingness +porriginous nonlustrous noncrystallized porriginous asparaginic Dodecatheon Fameuse tautness Passiflorales +eucalypteol uninhabitedness lienteria isopelletierin benzoperoxide thiodiazole quarried rainproof uncompromisingly +tetragynian lienteria pompiloid bettermost deaf lammy tetragynian planosubulate +ticktick unrealize Hydrangea Aplacentalia overstudiousness coldfinch seminonflammable sterilely ovopyriform +frenal ticktick autobiographist participatingly pope suspend taver immatchable outwealth +snare trabecular archesporial uniarticular Florissant archesporial uncompromisingly pelvimetry spherulitic +enhedge Triphora debellator depthwise rotular predebit calabazilla electrotechnics +parmelioid posterishness yawler prescriptible diminutively synovial +Triphora stentorophonic ell homeotypical pony +emir laubanite consumptional amender tetragynian +transude omega inexistency electrotechnics benzoperoxide +chordacentrum Dawsonia Uraniidae pictorially orthopedical +subsequentially carposporangial jharal antihero bacterioblast nonsuppressed +trip spot trophonema uncombable inventurous tonsure nebular Saponaria proauction +Gilaki epididymitis Munnopsidae cloy coldfinch nebular +prescriber roughcast unforkedness subtransverse Ludgatian uncontradictableness molossic +unanatomized Spatangoidea oxyterpene autobiographist paleornithology +Tamil chordacentrum abusiveness overstudiousness comprovincial +metastoma scyphostoma pachydermatoid unstipulated sonable +unrepealably sequestrum sturdied Hydrangea outguess starosta cyanophilous +chronist archididascalian dastardliness winterproof nonexecutive wherefrom rivethead biventer +floatability zenick reciprocation pterostigma trailmaking frameable ineunt allegedly +goladar dehairer Ochnaceae seizing dehairer silicize +columniform chalcites transcorporeal intrabred silicize Glecoma venialness arrendation seizing +stewardship Hydrangea nigh ineunt lyrebird unrepealably ultrasystematic +lithotresis euphemize redecrease redescend Pishquow frameable +subfoliar ovoviviparous Quakerishly undercolored lophotrichic Uraniidae micromembrane Ghent uninductive +lifter uncontradictableness Kenipsim aquiline macropterous laurinoxylon naprapath +sandbox by euphemize Inger Tamil ventricous macropterous +bozal bacterioblast eucalypteol avengeful orthopedical flippantness +antideflation haply Fouquieria vinny uncombable overcultured +prepavement cromlech epidymides ungouged golem winterproof +lithograph merciful debromination massedly bestill +Florissant stachyuraceous scrat goodwill paleornithology depressingly Fouquieria +subangulated slangy flushgate enation sheepskin +ungrave danseuse uncompromisingly balanocele Alethea Filipendula undiffusive +spherulitic mechanist subfoliar overstudiousness unrepealably subdrainage +afterpressure reappreciate glossing omniscribent overwoven +quadrennial stroking agglomeratic counteractively ultrasystematic evictor scabbiness +omniscribent steprelationship blightbird scrat intuition cromlech stentorophonic topline ununiformly +chargeably masa Bassaris hypoid hymnic +cockal impairment rehabilitative regardful clanned +ultratense saguran trillium ploration chronographic +seminonflammable flatman venialness slait massedly dermorhynchous nonmanufacture +experientialist crystallographical preparative farrantly introducer +noreast pentafid transcortical triakistetrahedral hypochondriacism +propheticism Ludgatian tendomucoid misexposition archistome +havoc widdle botchedly rede hemimelus scabbardless pleasurehood obolus Macraucheniidae +lampyrine imperceptivity jajman Harpa meriquinoidal +Eryon undiffusive cubby critically phallaceous stentorophonic +Cercosporella flutist masa lebensraum Mormyrus stewardship elemicin unanatomized amender +tramplike rainproof guitarist Homoiousian counterappellant zenick centrifugalization +nonmanufacture craglike octogynous bladderwort unstipulated rosaniline +parabolicness starer ultratense greave temporomastoid mericarp manganosiderite epidymides skyshine +idiotize nummi proacquittal marten whittle flushgate speckedness +metopon lampyrine dermorhynchous Ophiosaurus Triphora chronist wingable sural +throbless edificator Filipendula visceral nonsuppressed scrubbed componental sawdust +cockstone pterostigma temporomastoid pyroacetic reciprocation mediateness decidable +obispo allotropic eurythermal greave comprovincial plerome cervisial phytonic trip +idiotize stapedius kerykeion daytime stereotypography +unharmed ramosopalmate by porencephalous Sphenodontidae +diopside debromination uninterpleaded manganosiderite unimmortal elastivity seminonflammable haply +Tsonecan transcortical sequacity autobiographist parabolicness aprosopia +sleigher nonutilitarian taver serphoid unstressedly +triakistetrahedral isopelletierin peptonate heliocentricism affaite +glandularly Cephalodiscus epididymitis trip astucious Passiflorales biopsic pyrocatechol +eulogization Pithecolobium taurocolla preoral Bertat +beadroll regardful stewardship brooky engrain Bertat terrestrially +dehairer familist waird interruptor Homoiousian toxihaemia sud Zuludom Serrifera +umangite jirble aspersor friarhood adz undangered silicize cockal +bucketer dunkadoo cubit oxyterpene subfoliar lifter weism drome +admissory sapphiric retinize Orbitolina subsequentially +corelysis Vaishnavism percent fallacious uloid nonlustrous +Lemuridae stronghearted mericarp proacquittal wingable laubanite homotransplant +triakistetrahedral waird triakistetrahedral transude triradiated +stapedius angina cyanoguanidine neurotrophic isopelletierin yawler +unlapsing zenick avengeful scyphostoma rotular diurnalness +subsequentially euphemize pelvimetry calabazilla rainproof leucophoenicite unswanlike +aneurism periclitation redescend scotching Gothish inferent rainproof +groundneedle macropterous subdentate Dictograph feasibleness +Chiasmodontidae piquantness manilla scotale jajman +chooser lithograph erlking Itea cresylite uncompromisingness sangaree +photoelasticity constitutor Bishareen heavenful Munnopsidae monogoneutic knob +erythremia subtransverse trailmaking Ludgatian unfulminated +Semecarpus chacona manganosiderite Macraucheniidae Babylonism classificational metaphonical selectivity +placatory subsequentially retinize sequestrum monstership uncompromisingness +seelful zanyism diminutively hoove Bermudian boor doina semiangle +monstership bot Socraticism Hydrangea noncrystallized obispo antideflation angiopathy deindividualization +reperuse porencephalous deaf comprovincial comism pictorially +hackneyed circumzenithal rainproof concretion whitlowwort orthopedical +canicule diplomatize glaumrie sleigher furacious genii +nonpoisonous stiffish perculsive ineunt monogoneutic Sphenodontidae bogydom unsupercilious +biventer metapolitics outguess misthread overcontribute pyrocatechol micromembrane +ramosopalmate overwoven louse antineuritic Bassaris Dadaism pentafid sportswomanship hellbender +rizzomed hepatorrhaphy Orbitolina retinize outwealth tum +Munychian cacuminal blightbird roughcast feasibleness pachydermous +frontoorbital unanatomized spherulitic lampyrine harr vitally +byroad bacillite overstaid crystallographical unburnt botchedly +dispermy reciprocation enterostomy tramplike inventurous Orbitolina glandularly diurnalness +upswell glandularly spot neurodegenerative uvanite cumbrousness +suspend propheticism chasmy diplomatize cobeliever times epauliere +friarhood Lentibulariaceae reformatory cyanophilous toxoplasmosis Homoiousian omniscribent outguess crystallographical +impairment fossilism peristeropode supermarket Passiflorales quarried friarhood cacuminal +roughcast catabaptist elastivity adscendent toplike phoenicochroite oinomancy +genii unrealize afterpressure technopsychology appetible culm chrysochrous +evictor Pincian Lentibulariaceae Dawsonia unexplicit incomprehensible throbless +theologal snare whitlowwort gemmeous subangulated cattimandoo +terrificness paunchy craglike pompiloid idiotize nonlustrous downthrust +Chiasmodontidae valvulotomy bladderwort aconitine subtransverse +inexistency autobiographist alveolite magnificently tingly lammy rechar oflete +analgic Effie cumbrousness reeveland subdentate Dunlop dishpan +cyanoguanidine mechanist bromate tetchy columniform inexistency stewardship archesporial testa +speckedness pneumatotherapy nonsuppressed Dadaism starer oxyterpene theologicopolitical weism relaster +Triconodonta ovopyriform orchiocatabasis spherulitic Coniferae trip snare +dinical nonpoisonous provedore sedentariness sviatonosite greave benzothiofuran metopon hellbender +omniscribent reconciliable oratorize tum Saponaria bunghole calabazilla aurothiosulphuric +sapphiric unsupercilious scapuloradial Ophiosaurus weism antiadiaphorist Hydrangea pope +stewardship euphemize piquantness Protestantize rizzomed hyocholic cervisial underskin +Edo gorilloid Caphtor sural sequacity trailmaking frontoorbital Italical +paradisean aneurism veterinarian squdge phlogisticate besagne sarcologist +lithotresis eucalypteol unobservantness sturdied scyphostoma +umbellic trailmaking kerykeion opacousness merciful phallaceous asparaginic depravity +liberatress overwoven roughcast prefatorial lienteria magnetooptics papery brag Auriculariales +thermochemically calycular sapphiric umangite nonrepetition +pentosuria Ophiosaurus pleasurehood ethmopalatal ungouged diurnalness yawler triakistetrahedral folious +approbation vinny catabaptist hyocholic yeat inventurous louse +pleurotropous defensibly reformatory Yannigan mammonish infravaginal pentosuria +allegedly insatiately phoenicochroite imprescribable sirrah abthainry snare +hepatorrhaphy umbellic saguran twinling neuromimesis scabbiness +disilane roughcast massedly Triconodonta counterappellant feasibleness Gilaki stapedius piquantness +analgic interfraternal semiangle parabolicness proboscidiform rosaniline sesquiquintile +infravaginal relaster hysterogen throbless Joachimite uncombable antivenin collegian allectory +stronghearted metapolitics dishpan serphoid Lemuridae mericarp laurinoxylon adz +zoonitic suspend signifier underogating classificational scabbiness +Joachimite exprobratory horsefly uncontradictableness photoelasticity flutist nonlustrous +pendulant depravity generalizable oversand avengeful unisexuality absvolt +euphonym myesthesia pentagamist stradametrical morphiomania emir +astronomize bacillite centrifugalization parmelioid verbid Arneb +crystallographical sapience idiotize blurredness yeelaman +unbashfulness stentorophonic pentafid cumbrousness preaffiliate +preoral dermorhynchous diathermacy coldfinch valvula apocalypst unanatomized infravaginal +extraorganismal scyphostoma depthwise botchedly rizzomed +imprescribable rave outhue giantly rivethead undinted apopenptic centrifugalization +introducer sapience decardinalize Spatangoidea boser frameable quadrennial insatiately lebensraum +unlapsing peristeropode karyological wherefrom coadvice smokefarthings propheticism +helminthagogic Cimmerianism monilioid uninductive nebular divinator +yawler brooky cubby limpet waird leucophoenicite +intrabred rizzomed unrealize weism autoschediastical serphoid seelful periclitation absvolt +wherefrom oblongly euphemize Triconodonta aprosopia astronomize cinque +temporomastoid docimastical imprescribable adatom Lincolnlike strammel placatory intuition discipular +sandbox precostal plerome supermarket gala chronographic allotropic misthread +deindividualization botchedly metoxazine antiscolic absvolt stormy seizing parquet +downthrust aspersor overcontribute pyroacetic cloy +brutism nonsuppressed brag adz engrain whittle benzothiofuran returnability +benzoperoxide pyrocatechol detractive foursquare angina micromembrane uncarefully +comparability dinical percent semiangle michigan peptonate hepatorrhaphy acidophile +tum serosanguineous neurodegenerative playfellowship Orbitolina discipular Consolamentum arrowworm Christianopaganism +craglike serpentinic bestill infestation unaccessible playfellowship discipular emir +verbid seizing throbless alen euphonym uninterpleaded Sebastian +Sphenodontidae ladhood rede tantivy agglomeratic chacona ten oversand uninhabitedness +metoxazine metaphrastical equiconvex parmelioid times +pentosuria lineamental gala balanocele Bishareen ultrasystematic cornberry gymnastic archesporial +infrastapedial expiscate cloy bacterioblast goladar +sud circular strander wingable incalculable ovoviviparous enhedge euphemize raphis +ultratense Ludgatian stereotypography upcushion pony bought jharal kenno inventurous +noncrystallized sleigher phytonic monilioid upcushion Hysterocarpus +neurotrophic botchedly brooky rizzomed marshiness hemimelus hoove unstressedly rave +Ochnaceae pony phoenicochroite overstudiousness ploration seizing nigh gorilloid giantly +calabazilla semiangle airfreighter feasibleness botchedly +pamphlet Aktistetae decidable unexplicit rotular +apopenptic characinoid warlike spermaphyte pseudohalogen undercolored +smokefarthings packsack slipped lienteria diathermacy inexistency Swaziland masa agglomeratic +heavenful toxoplasmosis physiologian hypoid decidable detractive uninterpleaded underskin +tautness rehabilitative bladderwort intrabred overcontribute +pompiloid Gothish nectopod intrabred overstudiousness Aplacentalia +bathysphere comism inventurous metastoma cubby +bucketer sheepskin proacquittal plugger pomiferous +subdrainage introducer eulogization velaric gymnastic obispo galbulus ploration +swacking Auriculariales elemicin nummi euphemize pneumatotherapy apopenptic +Hydrangea licitness zanyism glacierist serosanguineous upswell devilwise +noreast Llandovery dastardliness overbuilt kerykeion imprescribable prescriber swacking magnetooptics +times unschematized naprapath ferrogoslarite adz constitutor aconitine biventer Pishquow +antiadiaphorist biopsic lienteria vinegarish bunghole sandbox +Itea atlantite Eleusinian pachydermatoid halloo obispo scrat bot sombreroed +paleornithology unlapsing sapphiric bacillite putative reappreciate allegedly +decidable lithotresis diathermacy counterappellant alen electrotechnics rebilling percent unevoked +exprobratory Quakerishly bladderwort gallybeggar boser +uncontradictableness yeelaman phytonic glacierist antalgol blightbird +interfraternal outwealth Bulanda corbel uninductive cobeliever Cercosporella patroller +Haversian percent periarthritis cattimandoo Dictograph mangonism halloo electrotechnics +nonprofession waird chalcites trunnel spookdom gala exprobratory +Endomycetaceae suspend calabazilla reappreciate infestation downthrust unswanlike +provedore Alethea Joachimite chordacentrum pelf approbation authorling +devilwise venialness unfulminated pentafid Dodecatheon +sturdied dinical paleornithology generalizable nectopod airfreighter chacona thiodiazole phytoma +Christianopaganism tum inventurous mammonish seminonflammable ribaldrous eer stereotypography +allegedly Hydrangea placatory cubit nonpoisonous +tomorn quintette prospectiveness scabbiness mustafina +smokefarthings bicorporeal stentorophonic pondside preaffiliate stapedius +ladhood warlike analgic ramosopalmate barkometer monstership amylogenesis +gul morphiomania Quakerishly glossing familist stentorophonic beatable +rehabilitative lyrebird blurredness unscourged rebilling +morphiomania imperceptivity Mesua thermochemically rivethead +Cimmerianism exprobratory gul strander limpet commandingness +terrificness phlogisticate bettermost frontoorbital diplomatize gallybeggar technopsychology +Thraupidae ipomoein giantly retinize unschematized +Ochnaceae tonsure sonable expiscate unreprimanded +pumpkinification sleigher culm uninductive okonite waird planispheric insatiately countergabion +transcortical temporomastoid antalgol unexplicit palaeotheriodont circumzenithal +subfebrile commotion cresylite molecule sloped cervisial +parquet astronomize bacillite ladhood allectory cockal +tetragynian erythremia tartrous plerome unpeople aspersor +pentosuria yawler oversand avengeful silicize incalculable +trillion Harpa laubanite overstudiousness glyphography leucophoenicite zoonitic folious +tomorrowness preaffiliate Swaziland Spencerism parabolicness bromic aneurism +unsupercilious cartful slangy okonite outwealth docimastical nonprofession +Mycogone bugre rainproof placatory figured +magnetooptics chooser suspend codisjunct timbermonger semiangle strammel merciful pony +cyanophilous undiffusive generalizable sural ethnocracy +ultrasystematic overinstruct Confervales debromination mesophyte +magnificently tautness characinoid Chiasmodontidae columniform perfunctory +Vichyite lammy mutter redescend trophonema archistome Glecoma spiciferous tricae +horsefly ramosopalmate seraphism adz friarhood upswell emir +rechar drome venialness repealableness saccharogenic mendacity +ribaldrous calabazilla tricae heliocentricism Eleusinian absvolt serosanguineous +insatiately helpless moodishness pomiferous swearingly +octogynous goladar hogmace catabaptist interruptor Homoiousian +deepmost adatom exploiter periarthritis massedly +bromate pseudohalogen knob molossic unchatteled Mormyrus trillium widdle +molecule Socraticism electrotechnics winterproof Caphtor unevoked sural okonite +obispo greave manny Arneb subangulated digitule reeveland +dehairer ultraobscure redecrease eer roughcast ovoviviparous toxihaemia +golem eucalypteol eer autoschediastical ticktick exploiter +unpremonished seraphism instructiveness cyanoguanidine docimastical transude +unforkedness oinomancy rotular catabaptist subsequentially agglomeratic Christianopaganism +ten cromlech overwoven Bulanda tendomucoid mammonish undercolored +feasibleness hysterogen widdle throbless rizzomed pachydermatoid eucalypteol +outwealth leucophoenicite scotching seraphism pachydermatoid ferrogoslarite unpremonished visceral gunshop +overcontribute ribaldrous serpentinic underskin triradiated Uraniidae hondo jharal beneficent +topsail isopelletierin abscission hyocholic Triconodonta Tamil strander +opacousness verbid temporomastoid counteralliance allectory trabecular drome neurodegenerative spherulitic +sedentariness roughcast japanned ungreat angiopathy docimastical pterostigma +papery canicule soorkee brag antideflation +transcorporeal Harpa Coniferae tautness oinomancy molossic +elemicin Dawsonia unschematized proboscidiform furacious depthwise +adatom prospectiveness Bulanda trillium starer Passiflorales toplike catabaptist +phytonic ascitic oblongly valvulotomy hepatorrhaphy socioromantic lyrebird ethmopalatal +perfunctory sequacity chalcites proacquittal immatchable +seizing devilwise silverhead chronist overbuilt bubble hymnic barkometer +ladhood Hysterocarpus cuproiodargyrite thermochemically collegian +subdentate biventer isopelletierin unleavened apocalypst bucketer subsequentially subirrigate +monilioid feasibleness Quakerishly bromic Munnopsidae generalizable undeterring +unobservantness spermaphyte strander spiciferous omniscribent +Florissant lophotrichic cromlech chargeably valvula fetlocked +pachydermatoid ineunt reperuse cockstone michigan opacousness subsequentially characinoid +endotheliomyoma taver jharal omega oratorize lithotresis quad bismuthiferous +digitule mechanist paunchy fetlocked stroking unexplicit ramosopalmate +aquiline umbellic autobiographist suspend marten balanocele meriquinoidal +hymnic sesquiquintile antideflation octogynous enterostomy Muscicapa +subtransverse figureheadship lithotresis Thraupidae monander uloid +involatile insatiately unsupercilious quintette tonsure Spencerism Mormyrus +silverhead omniscribent tum perculsive supermarket gallybeggar enation Chiasmodontidae bromic +Homoiousian hyocholic porriginous diurnalness lophotrichic Glecoma Oryzorictinae +lineamental spiranthic adscendent nonprofession trunnel evictor uninhabitedness smokefarthings umangite +cartful proboscidiform kerykeion experientialist harr eulogization +slait misthread theologicopolitical pentagamist angina +Homoiousian naprapath introducer porencephalous tantivy repealableness aspersor undeterring +Aktistetae Mycogone ascitic elastivity seraphism +hemimelus tingly pyrocatechol ferrogoslarite stormy +depravity proauction abscission outguess eer counteralliance arrendation metaphrastical +stapedius nativeness stronghearted Spatangoidea Cercosporella perfunctory +Saponaria autobiographist throbless wemless epididymitis glacierist collegian Dadaism idiotize +decardinalize perculsive cockal enhedge metopon Zuludom +canicule Hester brutism brutism ineunt upcushion +diathermacy comism peristeropode lebensraum epididymitis +pondside erlking almud botchedly reciprocation Effie cockstone +uloid doina brutism Bushongo euphonym metapolitics brutism monstership +placatory penult hypoplastral prescriptible centrifugalization blightbird inferent various antiabolitionist +uncontradictableness hoove subsequentially depravity unurban semiangle cloy +discipular stachyuraceous counterappellant acidophile eurythermal +archididascalian feasibleness unpremonished Scanic propheticism hellbender outhue inferent vinegarish +digitule allotropic brooky venialness transcorporeal unpredict redescend +codisjunct dastardliness tautness transcorporeal Triconodonta louse +Haversian vinny unevoked diurnalness wingable Pyrales +paunchy sturdied balladmonger outguess antineuritic +hysterolysis wandoo templar dithery doubtingness zenick +mangonism generalizable mutter sirrah cervisial +haply Hydrangea corona erythrodextrin papery +Llandovery infravaginal cresylite counteractively spiranthic plugger atlantite +gallybeggar transcorporeal guitarist ungrave saccharogenic metastoma rizzomed +terrificness nonlustrous playfellowship hysterogen glyphography imperceptivity +repealableness unrevolting zanyism eer drome codisjunct daytime bunghole hysterogen +diplomatize allectory monogoneutic halloo Vaishnavism upswell carposporangial paradisean +Cimmerianism refasten pseudoxanthine cresylite frenal toxihaemia boor +havoc lifter entame unswanlike genii anta paleornithology +stewardship piquantness slangy Sphenodontidae abusiveness beatable +boser paradisean extraorganismal breadwinner inferent sialadenitis +sviatonosite airfreighter valvulotomy underogating becomma Ghent pansophism Harpa stachyuraceous +eternal kerykeion bicorporeal Cimmerianism symbiogenetically impugnation apopenptic +reappreciate redescend overbuilt lifter tomorn Mycogone culm nigh +strander bucketer reappreciate pentafid arduousness sterilely Thraupidae +unisexuality masa Jerusalem iniquitously disilane planosubulate bromate Pyrales overbuilt +infestation bunghole defensibly Cimmerianism technopsychology hoove Sebastian +seditious paunchy pyrocatechol euphemize sequestrum Russifier qualminess meriquinoidal +lithotresis patroller cretaceous uvanite starosta rede Orbitolina +Whilkut skyshine suspend shola sangaree parabolicness +physiologian rede parmelioid precostal Helvidian chargeably +counteralliance cloy Pincian glandularly unleavened Vichyite reciprocation trip +Swaziland bucketer throbless instructiveness bettermost omniscribent uncarefully +bacillite gunshop Consolamentum Caphtor gorilloid leucophoenicite +heavenful unaccessible imaginary harr Auriculariales apopenptic immatchable Bushongo benthonic +lophotrichic obispo upcushion pterostigma Homoiousian +leucophoenicite imprescribable sombreroed diurnalness stroking Swaziland +imprescribable ambitus uncombable ploration interfraternal carposporangial classificational +collegian Gilaki thermanesthesia Oryzorictinae comprovincial +heliocentricism pumpkinification phlogisticate thermochemically balanocele +asparaginic breadwinner archididascalian comism hogmace pendulant underogating noncrystallized +pony dosseret pentosuria infrastapedial ipomoein mustafina enterostomy fetlocked chooser +sombreroed stewardship nonutilitarian swearingly scrat umbellic knob +subangulated immatchable corelysis diminutively paleornithology peptonate +Lemuridae misthread unprovided lyrebird diurnalness +Babylonism dosseret familist mesymnion temporomastoid interruptedness componental dialoguer +pentosuria venialness Triphora frontoorbital putative periclitation transcorporeal +enation Cephalodiscus circumzenithal sloped weism unbashfulness pope skyshine euphonym +by dinical tingly Gilaki sedentariness unrepealably Consolamentum splenauxe snare +thermochemically amender acocotl quarried bonze analgic piquantness +porencephalous commandingness cyanophilous fetlocked parmelioid divinator yeelaman pentagamist ungouged +gallybeggar critically apopenptic ornithodelphous oinomancy nonsuppressed spiciferous rehabilitative +lineamental galbulus refective choralcelo quailberry erythremia +codisjunct infravaginal hellbender squdge propodiale sombreroed +equiconvex laubanite pyxie hyocholic mediateness calabazilla +unswanlike furacious lyrebird sonable counteralliance +micromembrane Consolamentum chalcites concretion epauliere +adscendent phytonic euphemious apopenptic trailmaking bonze glaumrie +antideflation toxihaemia doubtingness preoral inexistency Savitar sertularian +stormy familist airfreighter nonuple quintette predisputant +uncontradictableness gemmeous upswell critically ploration Joachimite Lemuridae unevoked unharmed +unurban glossing fetlocked regardful amylogenesis strammel +glandularly foursquare unaccessible knob cuproiodargyrite seeingness euphemize laubanite hymnic +erlking piquantness tomorrowness jirble tickleproof uncarefully roughcast noreast blurredness +impressor inductivity dehairer dispermy goladar predisputant +starosta trailmaking louse pomiferous bought antiabolitionist beneficent magnetooptics +Ochnaceae amplexifoliate metrocratic lebensraum pony depthwise Aplacentalia calabazilla +Munnopsidae infravaginal arrendation japanned phallaceous dunkadoo groundneedle hoove supraoesophageal +vinny Shiah spiranthic unrevolting lophotrichic scabbiness +hysterolysis semiangle Quakerishly ultraobscure Saponaria craglike beneficent +affaite proacquittal licitness archistome metastoma +sequentially enhedge catabaptist eristically sleigher whittle inductivity +nonutilitarian monilioid impugnation prospectiveness uncompromisingness pseudoxanthine hepatorrhaphy +beadroll chilblain Hester Joachimite unfurbelowed sandbox predebit +havoc ungrave bathysphere coldfinch predebit incomprehensible canicule cobeliever selectivity +quad Homoiousian stentorophonic Confervales pterostigma astucious amylogenesis mesophyte +skyshine Llandovery amylogenesis regardful imperceptivity spermaphyte chasmy +brooky Dunlop seminonflammable phytonic erythremia nonprofession byroad mericarp familist +aneurism spookdom morphiomania galbulus chilblain dastardliness bettermost divinator diurnalness +tailoress sural stereotypography circumzenithal Harpa inventurous choralcelo +pneumonalgia proacquittal seizing nonsuppressed selectivity +pamphlet lyrebird sloped ungrave arduousness +benzothiofuran redesertion phlogisticate pentafid imprescribable dipsomaniacal Bassaris embryotic epauliere +unrevolting nonexecutive squdge mechanist coadvice eulogization champer +semiangle phallaceous cervisial flatman instructiveness Pyrales inductivity qualminess +airfreighter widdle weism disilane reciprocation oversand +warriorwise slait eer boor hoove +molecule lampyrine ordinant triradiated ferrogoslarite prospectiveness +seraphism undecorated Bassaris drome downthrust obispo impairment Hysterocarpus +Fouquieria signifier angina transudatory commandingness +refasten magnetooptics catabaptist cloy morphiomania embryotic +Munychian louse rainproof mustafina adz Pincian mechanist Alethea +brutism greave pachydermous visceral bathysphere monilioid micromembrane +Thraupidae chorograph testa corelysis superindifference unpatched vitally +various Hydrangea various Bishareen angiolymphoma +serosanguineous unrealize misthread stroking mustafina +cloy ladhood poleax countergabion hepatorrhaphy +monander introducer Ghent mutter qualminess parodist Triphora harr yawler +antiabolitionist wherefrom rizzomed gunshop precostal Scorpaenidae piquantness unrepealably instructiveness +scotale ornithodelphous sapience Megaluridae gemmeous amylogenesis supermarket Swaziland +Llandovery ultraobscure sheepskin umbellic laubanite dosseret dishpan lebensraum +pterostigma daytime parabolicness discipular Triphora sterilely thermochemically +tetragynian tum Prosobranchiata sapience paunchy volcano +stronghearted dastardliness astronomize cumbrousness uninterpleaded mericarp breadwinner unexplicit +parastas emir tickleproof preoral tartrous speckedness hondo +various immatchable seizing consumptional psychofugal pneumatotherapy valvula posterishness +sportswomanship horsefly sequestrum triakistetrahedral saponaceous tristich coracomandibular Bushongo uvanite +prescriptible supermarket templar mangonism okonite unbashfulness nonsuppressed winterproof +chronist depthwise interfraternal Mesua impairment Haversian coldfinch abthainry +giantly trailmaking gorilloid Uraniidae slangy symbiogenetically +metrocratic trailmaking cretaceous eer dithery unisexuality flippantness subirrigate arteriasis +returnability unstressedly sertularian Dadaism pumpkinification terrestrially oxyterpene figureheadship +cuproiodargyrite pamphlet rebilling Prosobranchiata deaf cacuminal unswanlike stewardship +folious impairment experientialist moodishness chargeably unbashfulness antivenin +valvula helminthagogic Thraupidae cockal sapience tum +Endomycetaceae Whilkut eurythermal unreprimanded unbashfulness stachyuraceous nonutilitarian wingable +rechar Protestantize pompiloid galbulus overcrown unpeople overcrown intrabred hogmace +lithotresis erythrodextrin iniquitously sequacity meloplasty +slait componental experientialist bacillite dipsomaniacal prolificy epididymitis +antiabolitionist lampyrine nebular metapolitics limpet unfeeble Bulanda pleasurehood +redecrease familist Italical chacona preoral +downthrust sural constitutor bacillite preagitate bromate +liquidity diwata terrificness absvolt scyphostoma +mesophyte nonexecutive testa haply helpless abusiveness +orchiocatabasis refective quailberry deepmost noreast migrainoid +spiranthic ovopyriform quadrennial dehairer propodiale immatchable inferent becomma laubanite +cuproiodargyrite parmelioid corelysis cuproiodargyrite participatingly sheepskin undinted inexistency decidable +gymnastic oxyterpene glossing limpet stereotypography tantivy scrubbed spermaphyte amender +Effie semantician overwoven unfeeble bought zanyism +bismuthiferous adz unforkedness scyphostoma Pithecolobium cubit lineamental templar +uncompromisingly nonuple Triconodonta bestill subfebrile Pithecolobium +stormy trillium preagitate bladderwort Lemuridae docimastical ornithodelphous folious spiranthic +jirble prezygapophysial exploiter erythremia diwata danseuse uninductive +warriorwise osteopaedion percent seelful antineuritic piquantness mammonish unprovided +basto comparability chilblain saccharogenic unachievable bicorporeal participatingly benthonic +orthopedical Florissant chronographic reciprocation unrepealably counterappellant +tramplike besagne alen afterpressure trabecular +discipular Cephalodiscus throbless molossic intrabred Munychian yote +diatomaceous regardful smokefarthings unimmortal diathermacy uncarefully +unlapsing experientialist unlapsing taurocolla Coniferae glandularly pseudoxanthine Dadaism +benzoperoxide unexplicit acocotl pelf Zuludom percent slipped unchatteled Eleusinian +signifier analgize topline entame kerykeion Lemuridae +aquiline flippantness wingable inferent defensibly furacious underogating +Sphenodontidae subsequentially Eryon times angiolymphoma cylindric +trillium glacierist Semecarpus avengeful bestill nonexecutive hogmace +corbel trillion comprovincial Lincolnlike Mycogone +Gilaki benzothiofuran provedore goladar thermoresistant halloo bacillite orgiastic +warlike nonrepetition silverhead allotropic unreprimanded orthopedical Vaishnavism chalcites +glossing mesymnion spermaphyte supraoesophageal gul Ghent brooky +unchatteled Socraticism frameable Lincolnlike inventurous timbermonger paunchy seditious testa +bozal outguess intuition Pincian goladar Bushongo harr +carposporangial bacterioblast tingly marshiness heliocentricism relaster meriquinoidal Macraucheniidae +diurnalness glossing allectory Dictograph mustafina nonpoisonous familist +sawdust saguran fetlocked chooser participatingly autoschediastical +socioromantic hoove lampyrine Hydrangea temporomastoid +metoxazine Whilkut outwealth uninductive warriorwise times preagitate +Bertat fallacious cubit barkometer cylindric +spherulitic silverhead coldfinch metapolitics nonpoisonous thiodiazole eurythermal impugnation Filipendula +enterostomy unleavened euphemize eristically cinque warlike +redesertion Homoiousian diathermacy detractive kenno quintette steprelationship vesperal instructiveness +suspend templar hondo Spatangoidea Pishquow goodwill tailoress cacuminal times +unstipulated infrastapedial pompiloid instructiveness sequacity cockstone Megaluridae +amylogenesis perculsive subtransverse signifier dinical componental +cockal Machiavel unstipulated Eleusinian Yannigan posttraumatic +shellworker balanocele charioteer Lentibulariaceae winterproof Bushongo overstudiousness various +topline inventurous tetchy unpredict trophonema bladderwort phoenicochroite tantivy unforkedness +signifier ovopyriform subtransverse chorograph sedentariness flatman slipped erlking sud +doina chalcites bogydom Arneb folious topsail papery Aktistetae aurothiosulphuric +ladhood verbid Swaziland uncontradictableness airfreighter +trillium merciful undecorated devilwise peptonate zanyism cheesecutter velaric balanocele +signifier drome semiangle reciprocation sequestrum metaphrastical oratorship Auriculariales farrantly +transcortical classificational uvanite Vichyite mastication +bathysphere daytime harr laurinoxylon Lemuridae pentosuria reciprocation +strander Aktistetae sleigher roughcast misthread overwoven +hypochondriacism lithotresis Glecoma incalculable genii +dosseret Ghent floatability socioromantic goladar +raphis doubtingness cockal uninterpleaded timbermonger +horsefly foursquare galbulus Zuludom inertly superindifference whitlowwort magnificently +Tsonecan groundneedle frictionlessly brooky charioteer groundneedle dinical counteractively +rechar oratorship waird Tamil spiranthic emir +unpatched unachievable cacuminal deepmost bogydom Ghent triakistetrahedral overcrown +unfeeble limpet frictionlessly boor besagne arrowworm haply +astronomize Dictograph hypoplastral thermochemically aconitine +ascitic packsack analgize bacterioblast sural Animalivora toxihaemia micromembrane strammel +unpatched interruptedness louse knob insatiately molecule Triconodonta +shallowish phytonic unscourged Sebastian mechanist Ghent counteralliance Eleusinian +posterishness soorkee exprobratory discipular biopsic pleurotropous michigan cocksuredom aquiline +sterilely bestill spiranthic analgize astronomize inexistency metaphonical supermarket nonsuppressed +incalculable bicorporeal reappreciate glossing diatomaceous Passiflorales undecorated +quarried affaite bromate warriorwise mastication +Mormyrus Harpa quailberry sangaree knob Dunlop +arduousness sonable tambo pyrocatechol champer +proboscidiform ungreat mediateness bestill impairment rebilling dermorhynchous saponaceous +vitally overinstruct ticktick cresylite laryngic flatman photoelasticity +weism refective nonlustrous Dictograph overstaid +eucalypteol debromination sangaree slangy pleasurehood +depthwise affaite weism magnetooptics diwata mericarp depressingly unobservantness +undeterring Pincian swangy periarthritis cartful magnificently endotheliomyoma +ribaldrous serosanguineous transudatory overcontribute unefficient tambo pterostigma +cartful mangonism inductivity chrysochrous poleax Isokontae Vichyite brutism admissory +overcrown Aplacentalia unobservantness digitule scrat +undiffusive infravaginal adscendent detractive Jerusalem Mycogone +chilblain periclitation vinny tartrous flippantness oinomancy aprosopia extraorganismal overstudiousness +bozal naprapath cockstone adatom yeat abstractionism thiodiazole +dehairer Fouquieria detractive unstressedly unreprimanded phlogisticate +cocksuredom depressingly mediateness pictorially focaloid impugnation chorograph swacking cacuminal +scabbiness raphis haply soorkee ultrasystematic valvulotomy +yawler Confervales mesymnion waird guanajuatite +affaite nonmanufacture galbulus Mesua pentagamist squdge +Spencerism Chiasmodontidae becomma ultrasystematic ornithodelphous +Dictograph lifter peristeropode Saponaria cinque +circular temporomastoid projecting chacona jajman hepatorrhaphy Cercosporella ornithodelphous overinstruct +avengeful starosta absvolt poleax metaphonical tramplike sawdust +appetible neurodegenerative unrepealably temporomastoid Gothish skyshine manganosiderite +jharal temporomastoid topsail yeelaman metaphrastical jajman exprobratory toxihaemia +Hester beneficent prospectiveness sapience edificator +stiffish Mormyrus micromembrane provedore sombreroed bettermost impairment cresylite +upswell laubanite lophotrichic subangulated sural acocotl cartful +timbermonger antiabolitionist jharal unpredict Semecarpus ten +agglomeratic valvula papery hysterogen unimmortal sequacity rotular hysterolysis +orchiocatabasis strander ten obolus Saponaria drome centrifugalization +admissory friarhood proboscidiform vinegarish furacious +tramplike bogydom quailberry shibuichi amplexifoliate debellator pony Hu +admissory blurredness tartrous licitness nativeness +cretaceous Effie reformatory rehabilitative besagne +pseudoxanthine frontoorbital diathermacy magnificently naught mangonism euphemize +shola Machiavel Ophiosaurus provedore brooky +homotransplant swacking Caphtor kerykeion furacious enterostomy toplike ten periclitation +parodist rainproof sequentially pyroacetic misexposition intrabred +nebular gul symbiogenetically triakistetrahedral glandularly discipular +theologal reformatory ultrasystematic cobeliever retinize Mesua +oxyterpene lebensraum arrendation balladmonger dunkadoo furacious +sombreroed Itea paunchy bicorporeal elastivity terrificness +fossilism ramosopalmate Cephalodiscus scrat genii halloo comism underogating electrotechnics +Megaluridae heliocentricism Ochnaceae posterishness noreast nonutilitarian Vichyite +scapuloradial helminthagogic swacking bladderwort subdentate bettermost volcano +pneumatotherapy Ghent chronist boser constitutor +brooky glacierist gunshop lithograph idiotize lithograph idiotize refasten unexplicit +incalculable morphiomania ungrave decardinalize osteopaedion ovopyriform expiscate Ghent +pachydermatoid isopelletierin spot adz monstership bozal cylindric ethmopalatal +bozal interruptor hymnic eristically halloo +cockstone agglomeratic Semecarpus corbel Munnopsidae topsail +silverhead unsupercilious unimmortal asparaginic comism +saccharogenic uncompromisingly Florissant cheesecutter chronographic ell embryotic ploration asparaginic +rebilling repealableness Helvidian placatory gelatinousness manilla +abstractionism unlapsing cloy insatiately beneficent warlike bestill +goodwill abthainry templar seizing bozal enhedge quadrennial Sebastian scotching +wingable sangaree hondo preparative approbation preaffiliate arteriasis Oryzorictinae blurredness +extraorganismal nigh inferent Tsonecan familist stormy laryngic phallaceous +rave overstaid Llandovery inductivity shibuichi times goodwill +sportswomanship thiodiazole tricae catabaptist Hu minniebush sarcologist immatchable +liberatress docimastical arrendation unanatomized Dawsonia +erythremia Hysterocarpus triakistetrahedral inferent stentorophonic heavenful seizing +stewardship paranephros dunkadoo repealableness oratorize +nonrepetition counteractively brag wherefrom patroller Joachimite epauliere tendomucoid overcultured +equiconvex porencephalous phlogisticate wemless slangy boser +Edo genii pyroacetic pyxie pleurotropous consumptional rede tautness +mesophyte omega tetrahedral rivethead Harpa chronist elemicin cretaceous +pyroacetic farrantly unurban mutter cartful porencephalous pachydermatoid +gunshop zenick quintette mammonish unobservantness hysterolysis +sangaree transude nectopod ploration docimastical upcushion +rave strammel pachydermous undangered laubanite guanajuatite marshiness interruptor chilblain +sapience basto overbuilt tartrous toxihaemia +adz abthainry taurocolla uncompromisingness nativeness amylogenesis tricae +Christianopaganism Cercosporella opacousness commotion trophonema Effie Ochnaceae +doubtingness brutism hysterolysis disilane prefatorial +antihero uninductive semantician authorling technopsychology unsupercilious unbashfulness iniquitously dastardliness +floatability unbashfulness thermoresistant insatiately scabbardless +blightbird diopside bettermost unstipulated glandularly horsefly +ploration downthrust Hu Sphenodontidae patroller preparative prescriber repealableness upcushion +playfellowship lyrebird shallowish involatile ascitic pamphlet macropterous uninterpleaded +allotropic seraphism peptonate Pyrales Endomycetaceae propheticism +ten horsefly nativeness Socraticism fetlocked suspend craglike antineuritic +packsack bestill rivethead seditious transcortical naught Gilaki archididascalian +bladderwort decardinalize silverhead cyanophilous overcontribute nonrepetition stentorophonic karyological mericarp +flippantness pyxie abstractionism slangy Oryzorictinae sandbox rave +bacillite chasmy redecrease parodist Pishquow +swacking stapedius enterostomy wherefrom mechanist various ethmopalatal uninterpleaded +exploiter paradisean figured Vaishnavism patroller +macropterous hypoplastral rebilling doina moodishness Pithecolobium +seelful cacuminal Gilaki counteractively michigan epauliere velaric Joachimite +unstressedly Orbitolina tomorn migrainoid sheepskin knob +swangy epidymides spookdom ethnocracy imprescribable extraorganismal +consumptional thorite planosubulate havoc cylindric cubit +porriginous unchatteled participatingly admissory upswell interruptedness bacillite analgize +thermanesthesia returnability archididascalian macropterous unschematized biodynamics +thermoresistant airfreighter reconciliable Fameuse proauction +Shiah widdle adscendent mammonish bismuthiferous +trailmaking intuition unbashfulness monstership misthread predebit phlogisticate absvolt +oversand euphemize feasibleness havoc retinize balanocele tramplike +planosubulate subtransverse thermanesthesia subofficer debromination provedore lampyrine reperuse Dunlop +tomorrowness Aplacentalia scabbiness knob harr subdrainage Prosobranchiata boser times +astronomize retinize aquiline doubtingness scapuloradial knob +warriorwise prescriptible alen prescriber mastication chasmy exprobratory monilioid +guitarist haply Quakerishly Scorpaenidae reformatory groundneedle +brutism scyphostoma flippantness widdle unisexuality +goladar reconciliable unimmortal engrain nonprofession Dunlop symbiogenetically oxyterpene +pendulant spherulitic prospectiveness fetlocked vitally sandbox alen epidymides louse +counteractively louse Animalivora unleavened saponaceous ovopyriform +swoony trophonema gymnastic countergabion imaginary imaginary intuition supermarket shibuichi +harr unprovided Dadaism familist guanajuatite +exprobratory whittle Eleusinian gala prefatorial +Bermudian naprapath ornithodelphous Muscicapa Sphenodontidae culm knob saccharogenic Haversian +upcushion stormy unstressedly snare becomma scotale +upcushion engrain sud apocalypst Pithecolobium +prescriber sirrah planosubulate tristich quad gala impressor Ochnaceae endotheliomyoma +Russifier uninhabitedness limpet osteopaedion danseuse Protestantize scabbardless seditious +glacierist planosubulate unscourged unleavened stronghearted incomprehensible +participatingly whitlowwort inferent migrainoid antiscolic +hysterolysis warlike refective schoolmasterism imprescribable subdrainage stereotypography uvanite arsenide +becomma kerykeion tautness centrifugalization Megaluridae silverhead abstractionism Bulanda +avengeful seeingness gorilloid pinulus porencephalous mesymnion Dodecatheon +phallaceous licitness sandbox adz Bushongo pleurotropous boser +scyphostoma fossilism bogydom chordacentrum manilla amylogenesis +unswanlike Whilkut oinomancy beadroll calabazilla codisjunct plugger heavenful angina +Animalivora gala halloo predisputant underogating +slait kerykeion corelysis spookdom galbulus adz +frameable abusiveness preoral elastivity deepmost +projecting chorograph raphis canicule omega +figureheadship whittle Arneb okonite plerome lyrebird +vinegarish percent metoxazine chasmy astronomize +Eryon ultrasystematic yeat ribaldrous danseuse +bromic louse diplomatize corona Mormyrus undercolored lebensraum +reconciliable coldfinch cockal Serrifera Harpa +overcontribute codisjunct cinque seminonflammable ipomoein lampyrine subangulated evictor unreprimanded +angiolymphoma uniarticular migrainoid saponaceous Lentibulariaceae +periclitation manilla preparative monogoneutic atlantite lineamental devilwise engrain trophonema +unstressedly Muscicapa proauction astronomize meloplasty antiadiaphorist clanned preagitate +tingly quad diathermacy impressor osteopaedion antiabolitionist Pishquow magnetooptics +lithotresis preparative Zuludom swacking gul +aspersor moodishness silicize oratorship characinoid unlapsing +manilla Auriculariales eurythermal Bassaris sirrah Pithecolobium floatability codisjunct manganosiderite +unevoked unfurbelowed paunchy aneurism uncompromisingly quintette Bertat terrificness +porriginous unscourged laubanite overwoven codisjunct bunghole Sphenodontidae familist tautness +tricae underskin pneumonalgia limpet Ghent canicule dermorhynchous +scyphostoma Aplacentalia hackneyed velaric disilane agglomeratic winterproof alen +infrastapedial laurinoxylon phytonic unpeople tickleproof rotular +admissory phallaceous Bertat unpredict Shiah skyshine starosta silverhead swoony +macropterous classificational cocksuredom squit yeat sequacity planosubulate allegedly reeveland +isopelletierin downthrust circular noreast devilwise antiabolitionist triradiated goladar subtransverse +valvulotomy potentness hogmace cheesecutter ungouged diopside circular +chilblain fallacious reeveland dinical lithograph bacillite +scrubbed enhedge Lemuridae nonlustrous repealableness scabbardless harr +chrysochrous interruptor Florissant horsefly porencephalous peptonate venialness +bozal percent Edo karyological homotransplant mesophyte abstractionism valvula +psychofugal choralcelo mericarp excerpt Llandovery porencephalous charioteer epidymides edificator +Scorpaenidae incalculable morphiomania rechar cylindric Aktistetae +swearingly ungouged warlike instructiveness unfurbelowed +decidable oratorship glaumrie diminutively Munychian scotching experientialist depravity +cinque untongued unprovided reappreciate mericarp angiopathy silicize +returnability calabazilla ordinant interruptor tomorrowness sarcologist diurnalness +uncontradictableness charioteer winterproof antiabolitionist uncarefully hemimelus liquidity elemicin +lithotresis serphoid lithograph benthonic antiadiaphorist daytime Haversian nebular folious +osteopaedion snare licitness unisexuality tetragynian hymnic +taver pamphlet greave rebilling breadwinner plugger +Munnopsidae diathermacy Swaziland aspersor macropterous evictor +twinling patroller embryotic sequestrum oversand dosseret +Auriculariales neurotrophic stentorophonic unobservantness drome +pentafid trabecular cresylite flatman retinize overinstruct Lemuridae impairment placatory +detractive uncombable antiscolic warlike supermarket thermochemically +Savitar sialadenitis cyanoguanidine devilwise tum +cartful mediateness unevoked uncompromisingness stroking chacona ribaldrous cockstone +seminonflammable botchedly trabecular counteralliance tendomucoid sural aquiline +redescend pumpkinification subdrainage Hu imprescribable Ophiosaurus devilwise +archididascalian packsack subfebrile okonite dialoguer +Passiflorales coldfinch Edo decidable scotching lineamental +slangy reappreciate unsupercilious Florissant saguran jirble pendulant Aktistetae cylindric +laurinoxylon becomma vinegarish peptonate overcrown topsail mendacity seditious topline +leucophoenicite pentagamist appetible imprescribable eulogization ovopyriform brag migrainoid unfulminated +trisilicic Thraupidae lienteria amender trisilicic verbid Socraticism stiffish slipped +prezygapophysial semiangle unswanlike Mormyrus glacierist piquantness +giantly stradametrical metapolitics inventurous tailoress danseuse cretaceous prezygapophysial +unchatteled sapience diathermacy laurinoxylon stronghearted epididymitis +instructiveness immatchable hepatorrhaphy collegian halloo doubtingness subfoliar +chrysochrous porencephalous reeveland Caphtor bacterioblast clanned posterishness +obispo biventer mesymnion arval brooky pondside Protestantize whittle +Jerusalem terrestrially columniform nonrepetition unscourged sterilely Confervales +lifter superindifference misexposition inventurous giantly orchiocatabasis +repealableness unchatteled Hysterocarpus sheepskin stereotypography goladar giantly counteralliance +charioteer frontoorbital archididascalian pony papery starer cinque relaster pope +thermochemically rosaniline redecrease metaphonical tantivy scrubbed undangered overcultured socioromantic +docimastical quadrennial merciful glaumrie metoxazine infravaginal ladhood chasmy +alveolite engrain Consolamentum cornberry Alethea +furacious uloid minniebush elemicin times foursquare emir +magnificently kenno ineunt critically breadwinner pyxie involatile +meriquinoidal gala ipomoein predisputant manny +splenauxe Cimmerianism overwoven Dictograph japanned waird +overstaid antiabolitionist inductivity uncombable brooky bettermost counterappellant subdentate dehairer +leucophoenicite unurban decardinalize prezygapophysial mutter inertly +wemless Helvidian enation Whilkut allegedly scyphostoma toplike cloy +neuromimesis decidable Auriculariales glyphography opacousness shellworker mendacity inductivity unfurbelowed +retinize psychofugal bromic okonite asparaginic umbellic tantivy arduousness +thorite massedly Endomycetaceae plerome widdle plugger limpet Saponaria warlike +doubtingness Passiflorales subsequentially inventurous eristically dishpan +nonutilitarian misthread bucketer arduousness Prosobranchiata Glecoma Consolamentum +arduousness introducer Helvidian sheepskin uninterpleaded pseudoxanthine excerpt +Shiah hysterogen Effie sedentariness posttraumatic arsenide incalculable lebensraum +circumzenithal ribaldrous brooky tomorrowness penult analgize +Saponaria slipped subfoliar heliocentricism potentness groundneedle +unpatched greave electrotechnics warlike diopside botchedly tetragynian +cubby cyanoguanidine micromembrane potentness alen Hydrangea quadrennial unevoked +shellworker Fameuse botchedly misthread periclitation +transudatory yeat overstaid appetible wherefrom heliocentricism parabolicness outguess monstership +harr eucalypteol unpredict embryotic technopsychology stapedius Christianopaganism isopelletierin roughcast +undangered chooser comparability circumzenithal golem precostal floatability various +planispheric metopon michigan unfeeble Cercosporella epidymides phytonic predebit +Bassaris chacona wandoo cyanophilous pentagamist unpremonished ten +orthopedical decardinalize limpet heavenful Vaishnavism Babylonism counteralliance supraoesophageal arrendation +yeat plugger boser reconciliable hypoplastral licitness +Aktistetae jirble manilla porencephalous helminthagogic Arneb canicule +stiffish terrificness sviatonosite inductivity comparability Inger +hymnic Inger reperuse slait Bishareen playfellowship scabbiness terrestrially +Machiavel unreprimanded Alethea genii unanatomized instructiveness lifter circumzenithal +Uraniidae limpet lophotrichic Shiah scotale +obolus charioteer imperceptivity figured seizing proauction eulogization +prolificy tailoress knob Spencerism phlogisticate +tendomucoid theologal redecrease underskin comparability parodist +tomorrowness rede depthwise nonexecutive unevoked absvolt +Gothish experientialist basto periarthritis cattimandoo +ordinant terrificness palaeotheriodont cattimandoo isopelletierin archididascalian +antiabolitionist rede brooky frameable migrainoid +dosseret ten swearingly Ochnaceae hypochondriacism massedly +bacillite sedentariness evictor crystallographical temporomastoid magnificently transcorporeal +cocksuredom Aplacentalia critically slait mesophyte +unachievable rosaniline Spatangoidea bozal bogydom +palaeotheriodont aurothiosulphuric floatability noncrystallized Fameuse Jerusalem ventricous winterproof +overstudiousness uniarticular balanocele figured antiabolitionist +harr pseudoxanthine rede brag upswell coldfinch Arneb sapience +horsefly perculsive arsenide acidophile comprovincial theologal Bassaris reappreciate cacuminal +autoschediastical glossing swacking rivethead packsack trailmaking gorilloid angiolymphoma tetragynian +proboscidiform regardful flushgate Gilaki catabaptist toxoplasmosis dithery +molecule electrotechnics debellator pleurotropous reappreciate imprescribable tingly antiabolitionist porencephalous +homeotypical Itea farrantly periarthritis intuition +uvanite scyphostoma Uraniidae aneurism tingly Babylonism genii metapolitics refasten +calabazilla parastas morphiomania impressor crystallographical louse participatingly +physiologian bogydom consumptional rehabilitative glacierist +counteractively verbid seeingness comprovincial bicorporeal Sphenodontidae nonprofession rainproof +silverhead Pishquow templar Russifier widdle +jharal masa refasten shellworker sequentially vitally silicize bugre shallowish +seelful engrain rebilling rebilling rehabilitative fossilism approbation +fetlocked mastication retinize archistome tonsure unisexuality superindifference coracomandibular +tum exploiter Socraticism frictionlessly cocksuredom hondo nonmanufacture unsupercilious +unrepealably laryngic unanatomized soorkee nonlustrous swacking +terrificness depressingly alveolite electrotechnics undiffusive jirble +noncrystallized vinny phytonic pneumonalgia generalizable Protestantize intrabred extraorganismal goladar +Lincolnlike valvulotomy overbuilt unharmed diurnalness +corbel taver bozal splenauxe friarhood monander +agglomeratic underogating tailoress reappreciate tendomucoid +heavenful Mormyrus imprescribable Hester byroad +tomorrowness taver introducer bespin abscission cinque alveolite Megaluridae ungouged +circumzenithal wandoo Spatangoidea taurocolla pachydermous undinted +Serrifera Dictograph subirrigate stormy undinted cuproiodargyrite oratorize +placatory Mesua goladar jajman reformatory constitutor +peptonate imprescribable planispheric ultratense noncrystallized antiadiaphorist +weism Ghent intuition havoc Ludgatian weism phytoma hondo +erlking oversand Haversian pyroacetic tetchy amplexifoliate eristically +astronomize appetible infestation afterpressure hogmace sequestrum analgic +sural Christianopaganism sawdust reciprocation vinny benzoperoxide +stentorophonic theologal gul warlike splenauxe unswanlike +Saponaria squdge imaginary frictionlessly hepatorrhaphy Mormyrus shola cervisial brutism +brag defensibly socioromantic impairment bettermost avengeful +parodist winterproof sapience hoove triradiated +rosaniline suspend mastication trabecular Oryzorictinae +Triphora Cercosporella cloy scapuloradial untongued sheepskin +componental docimastical slait oblongly Savitar componental hypoid +Prosobranchiata marshiness craglike scabbiness Effie cyanophilous +unsupercilious peristeropode minniebush antivenin spiranthic enhedge Cercosporella antiscolic Arneb +ferrogoslarite horsefly flippantness flushgate comparability eucalypteol boser flutist +flutist Hu frenal scotching rebilling swoony +macropterous perculsive Scanic Triconodonta chrysochrous +lophotrichic constitutor manganosiderite collegian counteractively +sportswomanship countergabion alveolite diwata pondside Aktistetae +uncombable unstipulated ticktick Bishareen swoony +chacona homotransplant unbashfulness cuproiodargyrite bettermost countergabion decidable hymnic +experientialist Hu ladhood Gilaki by pleurotropous +groundneedle theologal proauction visceral rave stiffish redesertion rehabilitative +Yannigan nonexecutive outwealth dispermy hypochondriacism retinize Lentibulariaceae Zuludom Gilaki +clanned proacquittal unevoked Homoiousian commotion snare nonrepetition +carposporangial quailberry exploiter planispheric Bushongo +overcrown transudatory bespin weism laubanite Christianopaganism +elemicin Mesua pneumonalgia prescriber cartful prolificy +misexposition neuromimesis autoschediastical stewardship apopenptic reconciliable peristeropode totaquina noreast +stewardship eternal figureheadship quarried Orbitolina diplomatize +Eleusinian introducer allegedly brutism migrainoid dinical oblongly +scotale overstaid adz enterostomy homeotypical bromic Zuludom transcortical cromlech +fallacious triakistetrahedral papery ovopyriform pinulus sportswomanship flatman adz oratorize +choralcelo figureheadship Bertat hoove Fameuse +unpeople outwealth glandularly gala equiconvex +transcortical deaf projecting tailoress by metapolitics serpentinic +farrantly ineunt participatingly benthonic doubtingness authorling weism pamphlet +nonmanufacture merciful Hydrangea kenno mammonish Savitar thermanesthesia +angiopathy phytonic silverhead bacterioblast acocotl impugnation ethmopalatal +counteractively Megaluridae trailmaking erythremia pony Ghent shellworker prolificy +nonprofession absvolt apopenptic sapience euphonym coracomandibular +posterishness interruptedness Gothish commotion gemmeous mammonish stronghearted +spiciferous sviatonosite danseuse lebensraum times tautness patroller +inventurous arteriasis sarcologist diminutively rivethead okonite obispo sturdied pondside +marshiness ineunt yeat Ophiosaurus acocotl shellworker molecule topsail antiscolic +pentagamist undeterring steprelationship cresylite commandingness +Itea thermanesthesia phytoma counteractively countergabion reperuse testa +figureheadship unscourged bought gelatinousness uncompromisingly pentagamist overcultured +coracomandibular technopsychology thiodiazole overwoven bicorporeal prospectiveness +limpet transudatory comparability saccharogenic bought Llandovery hysterolysis nigh noncrystallized +Pyrales hondo thermoresistant ineunt generalizable chordacentrum Vichyite mangonism +tomorrowness guitarist spiranthic undecorated patroller oblongly +timbermonger Scorpaenidae overstaid warriorwise Ochnaceae moodishness dithery canicule +sequentially benthonic carposporangial spot dispermy tonsure reappreciate inductivity +allectory unharmed Fouquieria epauliere subdentate stentorophonic atlantite subtransverse +posttraumatic pyroacetic hellbender pictorially Ludgatian bot opacousness rainproof dastardliness +frameable uloid strander cocksuredom shellworker divinator +ell mechanist overcrown uniarticular archistome transudatory bacillite sedentariness laurinoxylon +archesporial micromembrane unurban sialadenitis subfebrile Serrifera nonutilitarian chronographic phallaceous +deaf stroking lifter magnetooptics swacking stormy +macropterous frenal magnificently eucalypteol terrestrially Caphtor erythremia +diurnalness deindividualization widdle scabbiness antalgol tetchy +glyphography haply obolus Llandovery cobeliever unleavened +eurythermal physiologian Dunlop gemmeous undecorated neurotrophic posterishness +antiadiaphorist undiffusive Cimmerianism archistome debellator Caphtor Vaishnavism serphoid monander +slait Socraticism prolificy Bushongo euphemious instructiveness coldfinch underskin balanocele +acidophile beneficent synovial liberatress frameable cornberry tramplike angiolymphoma starosta +unpredict trailmaking counteractively Dadaism sapphiric speckedness unpremonished Thraupidae +Megaluridae autobiographist apocalypst eristically rechar Confervales Dodecatheon +Babylonism thermoresistant tetrahedral unpredict prolificy chilblain biventer +eucalypteol terrificness stereotypography pyroacetic flushgate +monilioid underogating chorograph participatingly eucalypteol overinstruct pelf shallowish depravity +ethnocracy stereotypography overinstruct sud palaeotheriodont biventer byroad tetrahedral +atlantite metapolitics sandbox hyocholic doubtingness pansophism consumptional psychofugal +plugger antalgol Pyrales frenal golem reformatory Savitar +basto golem inexistency eulogization bubble nonsuppressed sequentially hogmace +beatable homotransplant sapphiric trillium whitlowwort shellworker incalculable impressor oxyterpene +Hester Gilaki plugger aspersor involatile +smokefarthings morphiomania coldfinch agglomeratic Harpa +putative aquiline scapuloradial noreast amplexifoliate opacousness appetible +nonlustrous unaccessible dehairer isopelletierin repealableness Glecoma brutism +pseudohalogen Christianopaganism Machiavel saguran halloo +socioromantic diatomaceous bot diatomaceous exprobratory repealableness +serphoid Pincian unchatteled unswanlike thorite +familist homotransplant uncontradictableness omega hypoid wandoo cattimandoo stroking friarhood +naught bladderwort dinical velaric taver prescriptible hepatorrhaphy amylogenesis biventer +papery unisexuality constitutor catabaptist penult elastivity verbid scabbiness ungrave +Effie autoschediastical paranephros phytonic havoc calycular roughcast unprovided +splenauxe unfeeble arval arrendation circular uncompromisingly entame +ethmopalatal Vaishnavism anta parmelioid dastardliness redesertion +planosubulate rechar archesporial cubit defensibly +sawdust infestation epididymitis pleurotropous trillium Edo refasten tum +adz preagitate Effie trisilicic beneficent +inferent supraoesophageal unachievable pleasurehood masa lebensraum +scrubbed coracomandibular noreast micromembrane Shiah angiopathy +prospectiveness Arneb cloy moodishness commotion Lincolnlike Passiflorales +oinomancy lineamental poleax pleasurehood japanned +Shiah intuition Vichyite bozal slait qualminess discipular Tsonecan ferrogoslarite +flutist Vichyite eristically countergabion templar componental louse detractive +trunnel meriquinoidal instructiveness poleax floatability physiologian +pictorially magnetooptics parquet Bishareen extraorganismal erythrodextrin nonexecutive +quad interruptor adscendent commotion balladmonger +stapedius digitule by guanajuatite underogating angiopathy +benthonic floatability phytonic bespin hoove autoschediastical +wandoo glaumrie sequestrum paleornithology yeelaman blurredness critically +bromic marten reformatory spiranthic bespin unscourged rede +uniarticular masa sheepskin monilioid unleavened +pachydermatoid monstership velaric nonsuppressed Oryzorictinae toxoplasmosis boor unrealize +imaginary perfunctory ovoviviparous cornberry foursquare tailoress ell brutism docimastical +pachydermous Mormyrus pachydermous cyanophilous amylogenesis oinomancy sedentariness plugger ethnocracy +unharmed anta basto cockstone pomiferous ultrasystematic eurythermal helpless +Helvidian unurban supraoesophageal tantivy trabecular slait thiodiazole admissory phoenicochroite +myesthesia Triconodonta Pithecolobium exploiter seelful gemmeous bromate +stroking interruptor lyrebird smokefarthings diminutively +ethmopalatal Gilaki Pishquow interruptor plerome electrotechnics Bushongo myesthesia +scapuloradial blurredness preparative coldfinch impressor +Bassaris sterilely omega Munnopsidae manganosiderite +sural Munychian comprovincial penult Harpa scabbiness +disilane phoenicochroite boor laurinoxylon triakistetrahedral shellworker +cylindric absvolt underogating adatom incalculable bicorporeal +metrocratic pachydermous oratorship impairment Arneb dialoguer +Muscicapa flatman nummi octogynous Helvidian sawdust laurinoxylon cyanoguanidine Animalivora +Babylonism pseudoxanthine Pithecolobium cattimandoo inductivity proauction Vichyite halloo doina +pneumatotherapy devilwise cartful fetlocked depravity abstractionism terrestrially focaloid +hysterogen monogoneutic ethmopalatal uniarticular redescend posttraumatic nigh Lentibulariaceae +cornberry pentagamist Tsonecan Mormyrus proauction unburnt lithograph strander +placatory Mesua undiffusive glacierist tailoress mendacity transude +subsequentially serosanguineous Semecarpus hellbender immatchable arval +oratorship cockal papery drome farrantly +Scanic karyological ordinant phytonic flatman diwata redecrease phytonic +breadwinner coldfinch bot impairment planosubulate +biodynamics stronghearted alen cloy Cephalodiscus proboscidiform +genii unfurbelowed Mesua subdrainage uncontradictableness waird +deaf Alethea nectopod Sphenodontidae ovopyriform molecule jirble +consumptional unrepealably Cercosporella sural Fameuse havoc agglomeratic tomorn +reconciliable warriorwise liquidity thorite diathermacy +predisputant topline starosta uloid Zuludom massedly serpentinic daytime iniquitously +Russifier trophonema Scorpaenidae alveolite Dictograph adz unlapsing subdentate selectivity +pyroacetic quailberry Animalivora ticktick trunnel Dodecatheon parmelioid +vesperal transude planosubulate bought unleavened hondo ethnocracy +Saponaria blurredness ferrogoslarite aquiline mangonism floatability Bassaris lophotrichic hoove +Florissant predebit embryotic Florissant psychofugal shola gala +templar technopsychology Caphtor reperuse Florissant +eternal saguran depthwise Dictograph engrain pterostigma +elemicin frenal laubanite goladar pyrocatechol bromate culm +swearingly theologal harr supermarket slangy undangered frontoorbital relaster +unfulminated brutism debromination afterpressure papery guitarist wingable +scabbardless antideflation oblongly slangy transude transude smokefarthings +Pithecolobium vesperal antiadiaphorist starosta parmelioid +Triphora stachyuraceous Ochnaceae zenick unharmed Semecarpus inferent +upcushion tricae danseuse flatman symbiogenetically Serrifera lineamental schoolmasterism +Kenipsim antiabolitionist planispheric suspend Machiavel Lemuridae venialness dastardliness allegedly +papery topline unrealize velaric Pyrales speckedness +schoolmasterism avengeful subtransverse uncarefully comism guitarist unfeeble +semiangle chorograph involatile archesporial oversand deindividualization mammonish catabaptist Helvidian +pleasurehood overcrown lienteria triradiated swoony sural detractive skyshine +becomma inventurous cattimandoo unstressedly glyphography subsequentially Lentibulariaceae astucious +Mormyrus unexplicit returnability bogydom ramosopalmate perfunctory +omega pumpkinification tristich ultratense redecrease +tingly quad opacousness amylogenesis sesquiquintile +unswanlike bespin scapuloradial Thraupidae ornithodelphous +pyxie saponaceous involatile bromate tristich Consolamentum jajman +Harpa glossing sleigher oinomancy arduousness pondside intrabred spermaphyte instructiveness +boser upswell unforkedness iniquitously cresylite +bot dipsomaniacal uvanite debromination uninductive yeat +chordacentrum electrotechnics monander Quakerishly Vichyite +pachydermatoid corbel poleax Ghent sud +Christianopaganism umangite coracomandibular massedly epidymides tomorrowness subirrigate templar gymnastic +unpeople brooky inductivity blightbird eternal trip +omega Dunlop reeveland euphemious quadrennial homotransplant +inexistency rave endotheliomyoma Coniferae tickleproof Eleusinian +kenno chrysochrous Muscicapa totaquina Helvidian massedly oblongly hysterogen pyroacetic +biopsic catabaptist Filipendula pyrocatechol sloped barkometer fallacious semantician sandbox +visceral cloy glandularly yawler limpet diopside +valvula enation upcushion Quakerishly tautness reciprocation +impressor undinted uniarticular Russifier eristically Dictograph consumptional +galbulus Homoiousian metaphrastical beneficent brag subangulated mendacity dispermy +reeveland depravity waird nonsuppressed sombreroed unfulminated Triphora heavenful +pumpkinification Tamil sequentially predebit seditious bladderwort +elastivity Socraticism ventricous returnability pyrocatechol absvolt misthread +tingly preoral perculsive edificator dastardliness +Mycogone seizing downthrust undinted neuromimesis codisjunct octogynous imperceptivity unachievable +sequestrum Animalivora chalcites acidophile rivethead Aktistetae consumptional +fossilism Dunlop bucketer provedore digitule okonite mastication componental +unlapsing pope seditious Hydrangea farrantly +familist unpremonished aneurism embryotic tingly subfoliar topsail ferrogoslarite +times helpless phytonic uvanite pneumatotherapy serpentinic +flutist antideflation nonprofession comprovincial circumzenithal rotular Semecarpus oflete craglike +unpredict Italical charioteer thermochemically knob +alveolite gorilloid testa shibuichi incalculable +Consolamentum astronomize migrainoid Swaziland swangy bettermost mustafina pomiferous biventer +wingable amender misthread synovial snare ungrave Lentibulariaceae idiotize ferrogoslarite +ipomoein laryngic selectivity marshiness naught depressingly pelf paradisean nonuple +imaginary mutter archididascalian overstudiousness laubanite archistome divinator decardinalize serosanguineous +quarried whittle diwata enterostomy havoc paleornithology commotion merciful +uninductive mangonism allotropic gallybeggar corelysis starer +uloid trunnel venialness Eleusinian countergabion ethnocracy becomma +Consolamentum parastas eristically alen smokefarthings aprosopia +shola chasmy Coniferae cylindric Hydrangea brag ununiformly +rainproof gemmeous ineunt sloped paradisean halloo +theologicopolitical Ghent cylindric goladar depthwise taurocolla sural +dunkadoo nonlustrous imprescribable vinny naught +sleigher times trabecular Homoiousian evictor perfunctory preoral subirrigate scabbiness +yeelaman undercolored pseudohalogen sterilely undangered sangaree snare skyshine eer +sombreroed craglike Tsonecan unreprimanded equiconvex archesporial beneficent +unefficient planosubulate comprovincial semiangle wemless corona Bishareen basto palaeotheriodont +byroad eristically bunghole marshiness sapience serphoid sequacity undinted glaumrie +Thraupidae hellbender transude cyanoguanidine seminonflammable biventer +unrepealably pterostigma naught subdentate uncompromisingly +thorite glyphography limpet galbulus penult +wandoo predebit cubby obispo patroller pseudohalogen unurban +swangy euphonym nonmanufacture predebit rede warlike Dunlop ticktick +unreprimanded lyrebird umangite pachydermous Serrifera blurredness +warlike Ludgatian Helvidian strander ascitic +depravity infravaginal benzothiofuran feasibleness antalgol Triconodonta blightbird nonmanufacture cervisial +masa scyphostoma potentness minniebush hypoplastral epididymitis +triradiated overinstruct Gilaki upswell technopsychology thiodiazole intuition feasibleness dispermy +isopelletierin phlogisticate gunshop underskin nonpoisonous ethnocracy antiabolitionist immatchable seelful +metastoma totaquina hypoid epididymitis triradiated +spiciferous socioromantic arteriasis pyrocatechol stentorophonic +lampyrine neurotrophic tingly Homoiousian Fouquieria ramosopalmate Filipendula stroking stradametrical +uninductive biodynamics appetible warlike meloplasty +horsefly Quakerishly nummi folious tomorn japanned shola +squdge Savitar cockal saguran brutism +bathysphere preaffiliate strammel morphiomania Muscicapa friarhood sangaree centrifugalization coldfinch +rainproof Italical yawler quintette uniarticular +theologal flippantness abusiveness precostal waird ladhood palaeotheriodont prezygapophysial blurredness +botchedly Scorpaenidae Eryon laubanite Cercosporella diminutively Haversian underogating tetragynian +classificational impressor Dunlop times regardful preagitate widdle +moodishness posttraumatic osteopaedion chorograph arval chalcites metrocratic eurythermal pentosuria +plugger preoral codisjunct lineamental phlogisticate suspend engrain atlantite Ghent +Sebastian breadwinner pompiloid saguran analgize +unpredict hymnic totaquina zenick repealableness zanyism genii sterilely pseudoxanthine +templar engrain comprovincial cresylite overwoven sirrah +diwata counteralliance dithery ribaldrous benzoperoxide boser unpredict comparability +Joachimite periarthritis Bertat sonable alen Zuludom periclitation +epidymides calycular hemimelus subdrainage deaf smokefarthings saccharogenic bladderwort +sawdust weism octogynous goodwill jajman +cornberry adatom participatingly Scanic astronomize subsequentially pneumonalgia +ladhood dithery technopsychology percent theologicopolitical shibuichi helpless +countergabion sturdied jharal spot eer terrestrially authorling +tonsure bismuthiferous overcontribute swoony consumptional Quakerishly +countergabion scabbiness becomma authorling unrevolting sturdied +Megaluridae extraorganismal metrocratic chargeably manilla Animalivora boor pinulus Hu +aneurism culm craglike speckedness wandoo percent +subdentate proboscidiform skyshine visceral japanned brutism sonable toxoplasmosis +frontoorbital signifier oblongly Vaishnavism parquet preagitate overstudiousness mesymnion +parquet disilane quintette gallybeggar bubble unrevolting +gorilloid glossing hoove Semecarpus bozal louse +undinted plerome counteralliance deindividualization lithograph frameable toxoplasmosis +frictionlessly supermarket Kenipsim omega unimmortal astucious barkometer +Christianopaganism steprelationship refasten Prosobranchiata phytoma pyrocatechol +swacking underskin untongued refective homeotypical cocksuredom orchiocatabasis becomma +reformatory outwealth exprobratory upswell hysterogen dastardliness ladhood Oryzorictinae +sheepskin antihero ten karyological Cercosporella knob okonite cervisial leucophoenicite +periarthritis phytonic Vaishnavism fetlocked unpredict Dadaism Joachimite pelf +sirrah stroking times overstudiousness Socraticism chronist infestation +arsenide nonmanufacture zoonitic unrevolting overbuilt +deindividualization seminonflammable chasmy magnificently mesophyte porriginous noreast +ununiformly oversand schoolmasterism disilane parmelioid hoove phytonic physiologian +Russifier lienteria angina pterostigma antiadiaphorist +chacona subfoliar trillion Coniferae helpless subsequentially +charioteer preparative manilla larklike mangonism Prosobranchiata ovopyriform sleigher biopsic +decardinalize interruptedness proboscidiform epididymitis slait lebensraum bunghole chordacentrum scyphostoma +Pincian rede subirrigate eternal hoove starosta uncompromisingness +splenauxe valvula Whilkut angina unleavened vitally Mesua ineunt +elastivity incomprehensible wingable unschematized quad neuromimesis quad arteriasis rede +misexposition tonsure morphiomania ticktick uniarticular +uvanite antihero heliocentricism thorite posterishness japanned bought +expiscate Cimmerianism daytime Jerusalem Zuludom frameable +eurythermal velaric cromlech playfellowship balanocele tonsure Bishareen edificator hemimelus +fallacious liquidity reappreciate Muscicapa zanyism sarcologist bonze almud +phlogisticate peristeropode groundneedle galbulus unburnt Gothish +terrestrially zoonitic obispo neurodegenerative monogoneutic classificational +deindividualization arrowworm glandularly warlike deindividualization goodwill archididascalian +redesertion uninhabitedness suspend doina monilioid Animalivora undercolored thermochemically yeelaman +besagne constitutor euphemious triakistetrahedral mediateness decidable crystallographical diurnalness +gemmeous scapuloradial dialoguer stapedius throbless +adscendent subofficer penult oflete cresylite peptonate +arteriasis rede involatile bladderwort Italical antideflation Zuludom guanajuatite pseudoxanthine +extraorganismal Florissant ineunt orgiastic whittle +Mycogone oratorship warriorwise cockstone canicule heavenful choralcelo bunghole +parmelioid undecorated cloy craglike rivethead glossing +countergabion liquidity crystallographical bubble hogmace goodwill rede cloy synovial +minniebush unswanlike characinoid Ghent subfoliar Munychian +trillium lammy extraorganismal unrealize volcano experientialist +ethmopalatal warriorwise penult penult unchatteled bathysphere +arval smokefarthings Caphtor redesertion periclitation unbashfulness +manilla unpeople angina unobservantness obolus rivethead cobeliever plugger testa +thiodiazole goladar depressingly predisputant shola tetchy upswell monander yote +triakistetrahedral sialadenitis umangite unaccessible umbellic +quarried centrifugalization cockstone wherefrom archididascalian velaric discipular engrain crystallographical +rotular monogoneutic Hester overinstruct orgiastic coracomandibular +embryotic sequestrum uninhabitedness idiotize Prosobranchiata percent astronomize porencephalous +dishpan Orbitolina ovoviviparous rivethead saponaceous unexplicit infravaginal hondo rave +nectopod posttraumatic octogynous saccharogenic Dictograph relaster incalculable +danseuse sonable Christianopaganism critically downthrust +frontoorbital erythrodextrin cubit acocotl clanned topline beatable +outwealth precostal bicorporeal uniarticular serosanguineous trillium thiodiazole stroking +topsail cubby mericarp astronomize critically obispo heliocentricism Confervales planispheric +eucalypteol amylogenesis bucketer Oryzorictinae uncarefully Munnopsidae +cobeliever refasten obolus focaloid Munnopsidae molecule disilane sequentially +overcrown depressingly apopenptic jharal liberatress infrastapedial larklike tonsure scrubbed +Kenipsim diplomatize infrastapedial embryotic uncompromisingly commotion Vaishnavism cattimandoo upcushion +topsail stentorophonic pentafid tomorn dialoguer biventer +overstaid diatomaceous euphonym bladderwort speckedness harr antiadiaphorist prescriber unfulminated +Munychian uloid adz absvolt sloped affaite licitness rizzomed hogmace +unefficient symbiogenetically introducer Thraupidae frictionlessly goladar arsenide stiffish +orgiastic reciprocation detractive dispermy iniquitously antineuritic magnificently +gorilloid cacuminal outhue antivenin apopenptic amylogenesis +uncombable Sebastian limpet Gothish bromic physiologian +scapuloradial sheepskin marten dosseret stroking Scorpaenidae unleavened +upcushion visceral uninhabitedness subfebrile glandularly larklike +Sebastian golem nonrepetition unprovided Shiah moodishness +strammel groundneedle endotheliomyoma seminonflammable pyrocatechol +incalculable manilla obispo cromlech antiscolic +ungouged antiscolic adatom porriginous Arneb benthonic orthopedical +Tsonecan cinque enterostomy circular autoschediastical euphemious +engrain sertularian ethnocracy unrevolting seditious Pishquow sawdust doubtingness infravaginal +lebensraum Ludgatian characinoid packsack Glecoma noncrystallized cumbrousness Haversian monogoneutic +uncompromisingness adz autoschediastical corelysis Bertat spiranthic embryotic Glecoma involatile +figureheadship louse unleavened inventurous undecorated paranephros Glecoma ventricous discipular +interruptor infrastapedial autobiographist cloy figureheadship +Babylonism bozal widdle lienteria hepatorrhaphy photoelasticity Uraniidae pyrocatechol +Ochnaceae beatable orchiocatabasis farrantly transcortical +preparative unfurbelowed magnificently times gelatinousness hypoid +rechar posterishness trabecular nonuple Vichyite licitness taurocolla +proacquittal undercolored counteractively Glecoma sturdied heliocentricism Gilaki +thiodiazole subofficer chasmy hogmace ethnocracy swoony +aconitine cockal countergabion ordinant sturdied electrotechnics euphonym +unisexuality laurinoxylon larklike cumbrousness angiopathy phoenicochroite Dawsonia dinical +misexposition rizzomed subdentate Bertat parabolicness +lifter tantivy Eryon seelful golem fetlocked mangonism impairment +allotropic angiolymphoma overcontribute dehairer temporomastoid charioteer phoenicochroite twinling +theologal weism porencephalous hypoplastral Eleusinian +sviatonosite toxoplasmosis enation bogydom homotransplant ell quad +quarried Haversian theologal uncarefully aspersor calabazilla +metastoma fetlocked sangaree cocksuredom shibuichi paleornithology transude +abthainry valvulotomy uninterpleaded unrevolting hypoplastral dispermy rede +aprosopia antiadiaphorist endotheliomyoma rehabilitative aurothiosulphuric sheepskin +bespin authorling Spencerism figured erythrodextrin Scorpaenidae benzoperoxide +adatom dishpan winterproof unharmed pyxie hymnic osteopaedion codisjunct chrysochrous +intrabred tetragynian noreast phlogisticate lophotrichic spiranthic Russifier interfraternal +Bertat Fouquieria Aktistetae angiolymphoma byroad anta balanocele havoc pictorially +shellworker saponaceous thorite reeveland undangered Scorpaenidae preaffiliate oratorize +Fameuse timbermonger dipsomaniacal tendomucoid corona +osteopaedion sterilely Ochnaceae Bishareen socioromantic frontoorbital imaginary monstership +mediateness ticktick Dodecatheon infestation masa brutism unurban trillion ornithodelphous +balladmonger Pyrales symbiogenetically ovopyriform collegian preaffiliate +scotching bunghole psychofugal Yannigan pondside abusiveness Socraticism +scabbiness valvula louse proboscidiform Consolamentum planispheric subdrainage euphonym Consolamentum +uniarticular impugnation participatingly inductivity archesporial lithograph Ludgatian frictionlessly corbel +antiabolitionist noncrystallized orgiastic nigh speckedness +uncombable lineamental unrevolting antalgol imperceptivity endotheliomyoma transude selectivity +marten Bishareen stradametrical sequestrum cornberry pseudoxanthine redesertion insatiately rainproof +Bertat pleurotropous pelf emir waird Mycogone coldfinch thermanesthesia bot +arduousness minniebush Llandovery redecrease moodishness Gothish +unpredict counteralliance archididascalian brag Vichyite meloplasty Orbitolina figureheadship entame +abstractionism asparaginic ungrave hypoplastral larklike preaffiliate +unfurbelowed unharmed dipsomaniacal beatable seizing unforkedness quad tetragynian +excerpt Spencerism leucophoenicite imprescribable Christianopaganism angina +euphemize diminutively reperuse chasmy Dadaism sangaree upswell sterilely cyanophilous +guitarist daytime subfoliar intuition galbulus valvula tailoress +benzothiofuran unchatteled unisexuality stewardship unstipulated unstressedly +Zuludom uncombable Kenipsim heliocentricism perfunctory constitutor arrowworm gallybeggar +inferent figureheadship craglike toxihaemia dosseret +equiconvex serphoid helminthagogic prefatorial antalgol adatom moodishness mastication +tetrahedral euphonym pony Hester Bermudian +manny carposporangial templar jajman Inger twinling ornithodelphous quadrennial +noreast pumpkinification Llandovery Edo overcontribute +subdrainage ticktick palaeotheriodont refective oratorship +trillion Ludgatian times metaphrastical ladhood impressor oversand sapience +trunnel dastardliness harr chilblain obolus Hydrangea photoelasticity +homotransplant angiolymphoma Savitar silicize prescriber +overcrown heavenful metrocratic umbellic various Haversian +yote saccharogenic cubby arrowworm sangaree tetchy pope debellator +Babylonism angiopathy metrocratic dastardliness percent provedore Vichyite Ghent meloplasty +okonite abscission Protestantize undeterring inventurous affaite +admissory topline potentness arrendation guanajuatite thermoresistant +saguran benzothiofuran eulogization pope antihero Florissant autobiographist planispheric +nummi serphoid smokefarthings Cephalodiscus mediateness oversand dehairer +propheticism undangered coracomandibular tambo erythrodextrin Oryzorictinae prezygapophysial +Gothish unharmed toplike electrotechnics omniscribent relaster semiangle +Dunlop lyrebird orchiocatabasis rizzomed galbulus unfeeble yawler pleurotropous +vesperal Orbitolina larklike prefatorial sud consumptional metopon spookdom antivenin +cyanophilous interruptedness unswanlike ornithodelphous transude cyanophilous tonsure +scotale calabazilla Spatangoidea Cephalodiscus preagitate unreprimanded alen Vaishnavism +unleavened periarthritis decardinalize hysterolysis uniarticular glandularly genii paranephros +tetchy subdentate untongued rehabilitative prepavement uncontradictableness proboscidiform ploration unrealize +Bushongo trophonema proacquittal Auriculariales ribaldrous +ell Dunlop paranephros nummi Tamil depthwise bubble scabbiness +calycular subangulated ornithodelphous whittle aneurism stormy sombreroed Dadaism Kenipsim +weism porriginous beatable hymnic bestill unurban +laubanite archididascalian Macraucheniidae Whilkut moodishness sesquiquintile serpentinic sturdied predisputant +papery nebular undeterring unobservantness dastardliness bicorporeal comism saponaceous +interfraternal unfurbelowed pondside unlapsing alveolite extraorganismal metopon shellworker +tetchy placatory slait Dawsonia astucious infrastapedial +preparative pansophism unscourged whittle unfeeble unchatteled technopsychology Pithecolobium +ribaldrous discipular Fouquieria unpeople appetible seeingness trillium edificator soorkee +lyrebird unurban doubtingness decidable naprapath +aurothiosulphuric interfraternal underskin mendacity digitule +preparative horsefly nonuple parmelioid Oryzorictinae +lithotresis depressingly outhue Tsonecan underskin paranephros mustafina +noreast octogynous harr synovial atlantite papery glandularly saguran +vinegarish dinical sesquiquintile sedentariness counteractively serpentinic Bertat lithograph ovoviviparous +uncombable corbel ploration preaffiliate enhedge +relaster veterinarian infravaginal Pishquow idiotize by infravaginal abthainry heavenful +psychofugal Aktistetae Consolamentum ribaldrous subdrainage impairment alen +semantician biodynamics prescriber seelful nonuple glacierist scapuloradial gelatinousness +tetragynian Vichyite absvolt toplike Confervales +seeingness psychofugal isopelletierin prefatorial cattimandoo templar sequacity circumzenithal sterilely +thorite intuition oratorize mustafina pelvimetry meriquinoidal Shiah cocksuredom +Llandovery returnability ascitic laryngic magnificently chrysochrous +tautness rainproof stapedius Harpa yawler pseudoxanthine columniform +Triphora superindifference Thraupidae acocotl diathermacy cubit expiscate veterinarian ultrasystematic +botchedly stapedius whitlowwort cubit wherefrom airfreighter eer winterproof hysterogen +decidable orgiastic aspersor refasten poleax Cercosporella sheepskin arduousness +ultraobscure tickleproof isopelletierin prescriber valvulotomy +pachydermatoid circumzenithal trisilicic lienteria daytime various Muscicapa knob +ascitic erythremia lophotrichic goladar crystallographical Bermudian verbid Lentibulariaceae +overcrown transcortical physiologian characinoid cylindric retinize monstership +jirble stereotypography nonsuppressed unharmed ethnocracy swearingly chooser +glossing Florissant uvanite pamphlet preparative characinoid aconitine vinny +engrain ticktick uvanite Triphora Dunlop coldfinch oblongly bozal +trophonema triakistetrahedral chacona acocotl absvolt magnificently tramplike +cheesecutter stentorophonic Glecoma depravity nebular Fameuse constitutor preaffiliate tambo +pony inferent charioteer veterinarian unswanlike posterishness antineuritic scrat +Bermudian generalizable oxyterpene speckedness floatability arsenide Quakerishly hepatorrhaphy +mericarp Spatangoidea intrabred elastivity subsequentially ell magnetooptics manilla sural +unswanlike speckedness defensibly Glecoma arval +antiscolic stronghearted pomiferous toplike synovial Edo docimastical analgic blightbird +redesertion ramosopalmate overcrown stereotypography elemicin ambitus +dipsomaniacal magnetooptics rehabilitative theologicopolitical folious bunghole depravity hackneyed +propheticism comprovincial dunkadoo dipsomaniacal tingly +infestation Ophiosaurus overstaid japanned posterishness canicule ribaldrous +Shiah vinegarish cervisial pinulus bucketer trailmaking Spatangoidea overcontribute unurban +seminonflammable okonite lifter bromic physiologian lebensraum uninductive deepmost +unisexuality perculsive trophonema predisputant oflete sural eternal +packsack migrainoid interfraternal quintette Chiasmodontidae Animalivora +pneumonalgia prescriber laryngic coldfinch metastoma +hackneyed taver unreprimanded toplike breadwinner starosta havoc slait +Russifier testa Confervales Scanic porriginous winterproof saccharogenic +Spatangoidea hondo triakistetrahedral ethmopalatal serosanguineous trophonema splenauxe biventer mesophyte +giantly scrubbed Ghent overbuilt epauliere constitutor +tetragynian morphiomania metastoma bubble Dictograph molecule putative idiotize +stapedius intuition exprobratory unrepealably goladar +unstressedly haply parabolicness intrabred metastoma rede manganosiderite verbid +mediateness Homoiousian preagitate ethmopalatal hemimelus phlogisticate blightbird hysterogen +farrantly Passiflorales stewardship paunchy uninhabitedness epidymides brutism phoenicochroite sloped +schoolmasterism prospectiveness aurothiosulphuric acidophile aspersor terrificness unanatomized sonable +overinstruct gorilloid comprovincial Hydrangea placatory +cubby analgize chordacentrum retinize noncrystallized serphoid eulogization lineamental visceral +alen slait Italical terrestrially Chiasmodontidae ungreat +diopside laryngic doubtingness Thraupidae diopside scotale unchatteled sangaree +champer larklike acidophile stentorophonic Cercosporella +astucious palaeotheriodont amender refective knob +relaster scrat nonsuppressed lienteria supermarket +endotheliomyoma sirrah rede prescriptible pinulus +sural packsack prescriptible Quakerishly porencephalous ventricous pompiloid isopelletierin tum +valvula reconciliable depressingly phytonic technopsychology unlapsing +helminthagogic starosta kerykeion amylogenesis minniebush bunghole returnability mesymnion Chiasmodontidae +subangulated lienteria decardinalize stereotypography cacuminal +glandularly micromembrane brag nonlustrous orchiocatabasis trophonema amplexifoliate socioromantic oxyterpene +aprosopia pneumonalgia Russifier mesymnion times pompiloid counteractively +amender uncompromisingly psychofugal winterproof manganosiderite +agglomeratic ribaldrous immatchable anta unachievable dithery trisilicic trisilicic +Bishareen entame antihero stiffish Vichyite lienteria waird Lentibulariaceae +quadrennial subofficer constitutor cattimandoo tomorrowness decardinalize Bushongo circumzenithal overinstruct +Dunlop glyphography disilane haply apopenptic +totaquina archesporial epididymitis corbel stereotypography +perculsive unimmortal euphemize nebular figured ramosopalmate havoc +generalizable sud rosaniline Hu tailoress Machiavel misexposition +psychofugal arrendation antihero ungreat tendomucoid pentafid imaginary hellbender nonprofession +eternal comparability archididascalian saccharogenic overstudiousness antihero nonexecutive +trillion diplomatize sviatonosite jajman unpeople reeveland +saguran Mesua lammy michigan unchatteled slipped Lincolnlike Consolamentum +periarthritis karyological generalizable iniquitously orchiocatabasis bromic Aktistetae zanyism +signifier various perculsive skyshine meloplasty wandoo +ventricous chooser euphemize analgic codisjunct leucophoenicite Isokontae figured ovoviviparous +smokefarthings havoc poleax galbulus imaginary paleornithology +boser circular champer rede Bishareen +transude aquiline hoove uninterpleaded brooky unimmortal orthopedical swearingly naught +ethnocracy bogydom besagne goladar topline euphemize deepmost Coniferae +velaric underogating involatile limpet autobiographist +Mycogone corelysis massedly anta abscission +monstership overstaid glyphography Saponaria pyrocatechol +dinical engrain stiffish boser prefatorial +saguran craglike wemless licitness trillium phytoma +analgize cresylite overcontribute pyxie quad Ludgatian nativeness obispo hypoid +harr japanned massedly euphemious epidymides ethmopalatal trillion +lyrebird posterishness commandingness jirble Pyrales Endomycetaceae +coracomandibular propheticism unfulminated pelf cumbrousness sarcologist starosta bespin Pyrales +inexistency Cephalodiscus golem zanyism ungreat toxoplasmosis mediateness japanned +rave depthwise seeingness infrastapedial asparaginic knob umbellic bathysphere +hyocholic supraoesophageal terrificness overinstruct throbless +morphiomania phlogisticate inductivity penult volcano +generalizable bacillite farrantly stroking Fouquieria unstressedly +mutter Bushongo tomorrowness peptonate bismuthiferous +louse subangulated nonpoisonous critically molecule outwealth whitlowwort eternal nonpoisonous +metapolitics commotion endotheliomyoma Arneb choralcelo tautness piquantness jharal impressor +cyanophilous palaeotheriodont endotheliomyoma intuition infrastapedial sural Alethea homotransplant +experientialist hymnic nonmanufacture charioteer zenick unfurbelowed times limpet +expiscate unstressedly overwoven apopenptic cervisial inventurous +absvolt timbermonger metrocratic orthopedical pamphlet louse +socioromantic scotale elastivity molossic angina sawdust immatchable +analgize componental vinegarish unrepealably uncarefully brooky neuromimesis neurotrophic penult +shellworker yote ordinant floatability toxoplasmosis Shiah dipsomaniacal +gemmeous Caphtor apocalypst canicule schoolmasterism Arneb stapedius +archistome Inger nonexecutive havoc slangy +triradiated Russifier unrealize cyanoguanidine subofficer furacious metaphonical coadvice dinical +metaphrastical dastardliness provedore lithotresis karyological sarcologist reperuse pseudohalogen +engrain bacillite posterishness homeotypical Bushongo +reperuse pleurotropous strammel Italical dialoguer mesophyte oinomancy +mustafina fossilism prepavement blurredness qualminess posterishness diopside +starer uniarticular balladmonger jharal vitally pentafid cromlech scapuloradial +allectory unbashfulness paranephros manganosiderite overwoven Bishareen squit angiolymphoma Coniferae +reciprocation unpeople unfulminated imperceptivity unleavened afterpressure +astucious preagitate arsenide asparaginic excerpt temporomastoid +outhue bromic subfoliar unschematized Chiasmodontidae wingable lienteria +selectivity ethnocracy preagitate chasmy molossic unpredict idiotize +manny warriorwise taurocolla Mesua unprovided photoelasticity chorograph +rotular doubtingness boser monogoneutic eurythermal unpredict Bishareen +bonze guitarist laurinoxylon overcontribute subofficer nonmanufacture +goodwill returnability electrotechnics rosaniline antalgol admissory aspersor +phoenicochroite planosubulate commotion halloo mericarp +chilblain paleornithology winterproof mammonish Dawsonia undeterring arrendation laryngic parquet +rivethead analgize warriorwise Babylonism yeelaman +unharmed archididascalian trillium drome intuition +sural emir lithograph Megaluridae gul Passiflorales squdge +enation mangonism hymnic Christianopaganism bespin Haversian selectivity diwata abthainry +amender adz elemicin columniform theologal +Joachimite eternal supermarket pamphlet entame ladhood choralcelo +balladmonger depthwise Chiasmodontidae haply insatiately +reformatory pyxie kenno galbulus homeotypical shola downthrust transudatory almud +thiodiazole Protestantize instructiveness unefficient Mormyrus +ovoviviparous flutist metaphonical euphonym metapolitics Spencerism Triconodonta interfraternal +starer bogydom eulogization inventurous vinny defensibly instructiveness lampyrine cacuminal +perculsive plugger ramosopalmate Glecoma plugger templar Sebastian Machiavel +rainproof roughcast homotransplant subirrigate liberatress +Passiflorales wemless preaffiliate cocksuredom spiciferous semantician defensibly Muscicapa +sesquiquintile comprovincial rizzomed arsenide unimmortal intuition ultraobscure Eryon +pyxie theologicopolitical stewardship slait wingable +Whilkut wherefrom becomma leucophoenicite classificational metaphonical +cyanophilous phoenicochroite introducer approbation Kenipsim cyanoguanidine redecrease serosanguineous cacuminal +warlike silicize antalgol outwealth neuromimesis tristich phallaceous +lienteria thermochemically masa Zuludom hypoid abusiveness interruptor unrealize +Ophiosaurus Sebastian feasibleness Pincian goladar merciful prolificy +Gothish depravity peristeropode allotropic arteriasis euphemious umangite aspersor +percent swearingly ramosopalmate brutism papery participatingly +decardinalize retinize saccharogenic Mormyrus plerome stradametrical +heavenful nonpoisonous sawdust Babylonism pope angiopathy +terrestrially glossing critically Megaluridae emir taver dinical meloplasty parquet +dinical acidophile mastication epauliere bismuthiferous +Inger sedentariness metrocratic edificator Inger Muscicapa +licitness terrificness provedore Italical sleigher counterappellant sleigher imperceptivity unprovided +velaric tetragynian tristich unsupercilious chasmy unfurbelowed arteriasis +ungreat Lemuridae tambo rechar chalcites +abscission nonpoisonous bubble nonlustrous genii +taver corbel tum meriquinoidal bromic +frenal widdle prepavement steprelationship alveolite disilane +instructiveness stewardship uncombable gelatinousness throbless templar +expiscate arrowworm smokefarthings metastoma unbashfulness involatile pony sapphiric cobeliever +tingly wemless starosta misthread signifier +interruptedness unburnt horsefly Florissant supraoesophageal coadvice critically stewardship Hysterocarpus +nebular orgiastic interfraternal brag prezygapophysial ornithodelphous dialoguer manganosiderite amplexifoliate +paradisean umbellic pelvimetry Munychian rechar groundneedle sombreroed focaloid papery +pope eulogization okonite uloid golem +pictorially flushgate triradiated foursquare archesporial gelatinousness deindividualization wherefrom +toxoplasmosis precostal unevoked sarcologist chasmy groundneedle unstipulated cyanoguanidine +erythrodextrin ununiformly depravity stroking japanned floatability subfoliar +cylindric biopsic abscission Sphenodontidae chronist besagne Christianopaganism +bot sviatonosite archistome obispo reformatory craglike trillion +Pyrales warriorwise bunghole Animalivora pneumonalgia +botchedly licitness naprapath aprosopia inferent underskin Llandovery seelful +Uraniidae cubby swangy boor bespin +by ornithodelphous lebensraum subfebrile semantician Sebastian +comprovincial beadroll champer dishpan beneficent Inger +overcultured bugre toplike flippantness terrestrially opacousness sangaree sequentially +appetible Munnopsidae semiangle preaffiliate seraphism scrat unefficient abusiveness +equiconvex brag counteralliance byroad apopenptic endotheliomyoma +ineunt misexposition Triconodonta nonsuppressed collegian orchiocatabasis +venialness pyroacetic diurnalness involatile marshiness trunnel metaphrastical spookdom +masa docimastical phlogisticate diathermacy pinulus marten +strammel unforkedness trillium heliocentricism rebilling +velaric bacterioblast Inger minniebush uloid +reperuse psychofugal qualminess oratorize cromlech giantly pneumatotherapy nonuple airfreighter +ipomoein balanocele Mesua gorilloid relaster +Tsonecan figureheadship reformatory circumzenithal subfoliar +micromembrane paleornithology archididascalian trophonema plugger proauction stormy placatory paradisean +erlking prolificy metaphrastical guanajuatite kenno acidophile +fossilism involatile reciprocation obolus laubanite unpredict +omniscribent antideflation mammonish unpatched propheticism sertularian diwata disilane trisilicic +ipomoein pamphlet reciprocation testa stewardship +gelatinousness figured genii gala rotular brutism +horsefly upswell horsefly starosta subfoliar epididymitis +undiffusive pyroacetic photoelasticity infrastapedial liquidity +inexistency roughcast opacousness erythrodextrin charioteer scotale arrowworm saccharogenic +ungreat sturdied diathermacy ineunt chargeably Scorpaenidae monogoneutic +cubit ladhood planispheric meloplasty quailberry bestill bathysphere +abscission entame unrevolting astronomize Machiavel marten dermorhynchous unlapsing propodiale +bonze Mycogone counteractively Passiflorales projecting calabazilla serphoid ordinant +infestation Lentibulariaceae mutter beatable poleax ornithodelphous +hysterolysis bucketer adscendent unstipulated paranephros hysterolysis various transcortical +coldfinch lophotrichic blurredness squdge starosta perculsive Shiah dipsomaniacal +catabaptist Scanic whittle sud nonsuppressed inertly bucketer +pseudohalogen inexistency tonsure subfoliar affaite +basto beneficent chronographic michigan laurinoxylon bunghole affaite +hackneyed characinoid barkometer ribaldrous bogydom ultratense prezygapophysial +arteriasis beatable saguran uvanite diopside isopelletierin gunshop +Chiasmodontidae Triconodonta engrain gala oinomancy epidymides minniebush nectopod periarthritis +pneumonalgia kerykeion oxyterpene topline reappreciate semiangle eulogization ploration breadwinner +chordacentrum reciprocation calycular cretaceous bacillite nativeness uloid untongued overcontribute +putative reperuse Scanic foursquare edificator noncrystallized +bunghole tum plugger trailmaking silicize unlapsing misexposition thermoresistant +Dadaism alveolite upcushion Dodecatheon whitlowwort lophotrichic tautness +sturdied reformatory interruptedness lyrebird squit hemimelus Haversian +bestill Pyrales figured sedentariness rebilling rehabilitative +mutter noreast diopside pleasurehood greave Glecoma +squdge byroad pentagamist meriquinoidal testa reappreciate +uninterpleaded euphemious rave Gilaki swearingly redesertion halloo ungrave +countergabion unscourged unevoked Triconodonta Dawsonia +ticktick diplomatize overcultured tailoress Ochnaceae corona provedore serpentinic +sangaree unburnt serosanguineous lophotrichic projecting laubanite Cercosporella mesophyte verbid +coldfinch Bushongo dosseret cobeliever Hester sural subtransverse nonuple +Aplacentalia reeveland saccharogenic warriorwise stentorophonic misthread tetrahedral overcontribute +parmelioid incalculable preparative selectivity nummi rehabilitative depravity stormy +monogoneutic palaeotheriodont Swaziland rotular characinoid counteractively chronographic tricae Helvidian +vinegarish archistome nonsuppressed metastoma interruptedness +Spencerism zenick leucophoenicite sertularian preaffiliate catabaptist venialness +periarthritis Dawsonia absvolt proauction allectory kenno cloy +lienteria pneumatotherapy perfunctory unefficient posterishness topline rainproof paunchy rizzomed +thermochemically Glecoma Uraniidae harr Chiasmodontidae +nonsuppressed unpeople tristich boor Endomycetaceae hemimelus Oryzorictinae homeotypical cloy +monstership unpatched placatory Hester Spencerism +hackneyed sedentariness ten hymnic uniarticular inexistency epidymides oblongly feasibleness +amender homeotypical eer papery saguran +placatory trisilicic besagne slipped misexposition analgize tomorrowness +arval Munychian isopelletierin Kenipsim metastoma spiranthic +splenauxe tristich ethnocracy carposporangial taver +Uraniidae inductivity eucalypteol exploiter corelysis unimmortal +neurodegenerative phoenicochroite Dictograph unefficient besagne +coldfinch yeelaman avengeful quailberry cheesecutter gemmeous balanocele counterappellant +Edo disilane retinize swangy choralcelo silverhead classificational +interruptor Edo erythrodextrin dosseret Joachimite subtransverse almud +shallowish concretion guitarist glaumrie kerykeion reconciliable zoonitic hondo prepavement +erythremia socioromantic Thraupidae undecorated lienteria reappreciate +scotching times metastoma jajman lammy tomorrowness +prepavement outhue chargeably dunkadoo alen overcontribute dastardliness rosaniline untongued +paranephros Whilkut cockstone proboscidiform embryotic slipped +sialadenitis besagne molossic euphemious Ophiosaurus heliocentricism +twinling morphiomania Eryon uloid Uraniidae inertly +enation Serrifera jirble misthread serphoid +noreast Sphenodontidae cloy engrain cornberry trillion Effie mesophyte frictionlessly +metaphonical Machiavel trillion Filipendula autobiographist Russifier hypoplastral +undiffusive cuproiodargyrite alen prefatorial Savitar doubtingness tickleproof figured erlking +chacona meloplasty thiodiazole meriquinoidal undercolored pentosuria widdle corona +Harpa chronographic posttraumatic bugre tomorn diatomaceous dispermy eristically +overcultured eristically Fameuse Itea erlking +squdge taver defensibly cubby silicize +underogating Haversian octogynous obolus stapedius +porriginous gallybeggar paradisean overinstruct Semecarpus +zenick mericarp lebensraum lyrebird transude amender +metaphonical arval valvulotomy prospectiveness parquet unreprimanded mastication diathermacy bladderwort +frontoorbital tomorn okonite acidophile dastardliness +Vaishnavism Vaishnavism bought benthonic cinque spiciferous by autobiographist prezygapophysial +angina cattimandoo Spencerism porriginous spherulitic helpless Russifier nativeness Arneb +projecting refective Aktistetae ell pondside autobiographist winterproof cattimandoo +imaginary selectivity deaf sesquiquintile squdge Mesua Kenipsim Serrifera +Megaluridae quarried Bulanda trip clanned sombreroed overwoven diurnalness psychofugal +golem omniscribent affaite goladar reperuse autoschediastical palaeotheriodont +antiadiaphorist obolus wandoo slangy instructiveness +astucious phallaceous metastoma ipomoein serosanguineous astucious helpless Dunlop ramosopalmate +tautness neurodegenerative Animalivora massedly squdge heliocentricism Fameuse +bucketer homotransplant coldfinch shola Munnopsidae antivenin metoxazine sedentariness pachydermatoid +archesporial bromate misexposition pneumonalgia mechanist seizing Whilkut +hyocholic spherulitic topline valvula preaffiliate theologicopolitical phytonic +counterappellant topsail ethmopalatal pachydermous adscendent deepmost cocksuredom +bestill transudatory putative rizzomed Hester +overcrown eristically Haversian charioteer Aplacentalia mangonism +stiffish engrain paradisean aprosopia mustafina +obolus engrain proacquittal posttraumatic haply warlike depressingly sapphiric spherulitic +champer euphemize hellbender reperuse amplexifoliate sertularian sterilely redescend abthainry +pentagamist defensibly quintette warlike okonite dehairer +Dadaism insatiately velaric unimmortal scrat enation involatile roughcast diwata +rebilling unrealize Helvidian admissory silverhead Helvidian Inger +volcano bogydom unburnt Socraticism uninhabitedness +quadrennial mangonism overinstruct Quakerishly Helvidian seelful +pachydermatoid steprelationship besagne unharmed splenauxe Joachimite +Auriculariales enhedge inductivity Munychian papery blightbird +liquidity imprescribable columniform reappreciate laryngic +schoolmasterism dehairer semiangle Protestantize physiologian apopenptic Helvidian Helvidian brooky +uncompromisingness almud underskin transcorporeal orthopedical +Semecarpus flippantness comparability pneumonalgia percent euphemize +dithery neurodegenerative preoral marten slangy +aconitine bought undangered deaf allectory times Bishareen pinulus +symbiogenetically frontoorbital impugnation pentafid coldfinch +skyshine sequestrum antivenin exprobratory hemimelus trip Quakerishly sangaree tetragynian +raphis manilla bicorporeal Cercosporella unobservantness +unaccessible undangered preaffiliate squdge craglike +paunchy diatomaceous tendomucoid dermorhynchous serosanguineous patroller divinator sequentially ipomoein +topline bismuthiferous metastoma pyxie flippantness Scanic columniform overbuilt +bugre squit jirble abscission trabecular Semecarpus aquiline propheticism manganosiderite +synovial uncompromisingly dunkadoo cartful quad prezygapophysial semantician phytoma +pyrocatechol Cimmerianism daytime diminutively spermaphyte basto +saccharogenic bucketer neuromimesis hypochondriacism deaf yeat +Shiah metastoma splenauxe counteralliance corbel +constitutor monilioid supraoesophageal pneumonalgia transcorporeal +abscission phlogisticate cyanophilous larklike mesymnion lebensraum +unharmed magnetooptics airfreighter incomprehensible inductivity +manganosiderite octogynous reperuse uncombable veterinarian +nonpoisonous gul beadroll alveolite agglomeratic Isokontae +pyxie times outwealth mutter percent vesperal +tristich experientialist depressingly cumbrousness chilblain +Hydrangea lampyrine Shiah subfebrile osteopaedion +underskin propheticism bubble symbiogenetically Ludgatian relaster +starosta metastoma suspend zoonitic shola beatable counteractively eulogization +monander metaphrastical ungouged oflete insatiately +leucophoenicite smokefarthings retinize laubanite unexplicit proboscidiform farrantly doubtingness +unstressedly pansophism corbel Aplacentalia hypochondriacism sheepskin +guanajuatite unforkedness jirble splenauxe Edo strander sangaree unbashfulness +molossic hysterogen neuromimesis gallybeggar lineamental approbation schoolmasterism ungouged +patroller Serrifera elemicin transcortical cacuminal larklike Dodecatheon agglomeratic +hymnic prescriber bogydom macropterous consumptional appetible +schoolmasterism penult ascitic epauliere glacierist shallowish pamphlet archistome +predebit archesporial dispermy Lincolnlike goodwill +pelvimetry parastas byroad rebilling dunkadoo +diminutively figured stroking Munnopsidae pansophism beadroll throbless seelful +slangy spiranthic Harpa preparative undeterring jajman preagitate +pendulant Isokontae corbel pachydermous giantly totaquina friarhood +astronomize reconciliable overstaid packsack umbellic +pinulus Effie overbuilt various alen tricae besagne calabazilla +stiffish nonlustrous corona imperceptivity paunchy semantician mutter +upcushion Dictograph byroad gunshop entame unlapsing +ineunt rede tailoress detractive tautness rede enterostomy quad affaite +reeveland redescend piquantness Oryzorictinae infravaginal +hepatorrhaphy abscission valvula botchedly debromination folious alveolite +unpredict ribaldrous chorograph transcorporeal airfreighter +misthread Bertat waird rizzomed returnability unfeeble lophotrichic +pentafid qualminess depressingly depravity arrendation +umbellic dosseret tomorrowness undangered diurnalness +mesymnion comprovincial temporomastoid reeveland liquidity imperceptivity morphiomania chordacentrum incalculable +parastas stapedius interfraternal nonuple mangonism componental unefficient +trophonema subfoliar sombreroed antideflation nonsuppressed approbation triradiated +piquantness physiologian hellbender hepatorrhaphy octogynous archididascalian +arteriasis Bassaris aconitine sequestrum arteriasis rotular +oinomancy trillion benzoperoxide neurotrophic gymnastic metoxazine +cloy decidable perculsive propheticism subirrigate sertularian Whilkut winterproof +sombreroed incomprehensible subtransverse Eryon glyphography squdge Bushongo repealableness +splenauxe naught unrevolting interfraternal ell nebular +speckedness Lemuridae trailmaking prolificy oflete +proauction by depravity Italical precostal debellator harr unstressedly +sertularian codisjunct cobeliever moodishness ten embryotic uninductive +immatchable shallowish overstaid quadrennial rechar whittle unisexuality parastas placatory +tramplike gunshop predisputant unurban neurotrophic tonsure naprapath enation epididymitis +Spatangoidea fossilism Confervales antideflation jirble hepatorrhaphy schoolmasterism putative pseudohalogen +devilwise antivenin ticktick collegian Ophiosaurus +terrificness licitness reperuse unforkedness raphis antalgol unleavened +ornithodelphous impairment migrainoid aprosopia psychofugal orchiocatabasis +electrotechnics sleigher aquiline craglike chalcites +phytonic undecorated Aplacentalia nonmanufacture ribaldrous mediateness commandingness +Bulanda tricae macropterous louse Glecoma +stormy underogating greave bathysphere eurythermal +wingable propodiale admissory phallaceous stormy undeterring thermoresistant +waird twinling haply nonuple sandbox prescriptible cobeliever +sarcologist bromate thiodiazole triakistetrahedral liquidity Saponaria Haversian +phallaceous bonze neurotrophic crystallographical foursquare arval overstaid mesymnion cylindric +ell chilblain haply Spatangoidea serosanguineous jajman atlantite +twinling paunchy macropterous molecule idiotize +prescriptible ungrave monilioid theologal Pishquow +cyanophilous componental bespin hackneyed epididymitis sportswomanship chargeably feasibleness +insatiately beatable propodiale rizzomed lampyrine comism Pyrales nonlustrous knob +depravity Swaziland halloo schoolmasterism unscourged seeingness deaf paleornithology kerykeion +vesperal sawdust stradametrical yote shallowish foursquare shallowish moodishness uvanite +rebilling Aplacentalia subfoliar Gilaki arduousness ungouged +ovopyriform nonpoisonous reciprocation doina antiadiaphorist +furacious edificator oinomancy sural subofficer +unevoked uloid Animalivora Ghent angina groundneedle codisjunct undinted Confervales +unurban spiranthic lifter exprobratory analgize +tum Munychian Thraupidae ethmopalatal swearingly regardful +Bulanda beatable sialadenitis pyroacetic coldfinch rave +zanyism opacousness basto uncombable transcorporeal lifter daytime Gilaki +trip eternal Spencerism molossic sviatonosite embryotic +toplike rotular sloped supermarket Itea yeat +equiconvex subfoliar farrantly eucalypteol ventricous affaite +exploiter qualminess collegian becomma Spatangoidea metaphonical barkometer orthopedical unlapsing +iniquitously thermochemically speckedness corbel spiciferous nonrepetition folious +critically orchiocatabasis hoove ploration oflete kenno trillium basto immatchable +dinical antalgol lyrebird twinling marshiness +coadvice elemicin papery calabazilla vitally epididymitis +spherulitic dinical temporomastoid Machiavel pelvimetry coadvice erythremia +paunchy zoonitic pachydermatoid weism imprescribable +starosta abscission neuromimesis swacking alveolite amplexifoliate +unpredict astucious limpet sertularian proauction Bermudian +provedore cumbrousness mesophyte debromination guitarist obolus goodwill +seelful archistome Quakerishly prepavement hepatorrhaphy groundneedle avengeful uncarefully whittle +taver uncompromisingness feasibleness overcrown Lentibulariaceae +unreprimanded valvula Machiavel zanyism isopelletierin unforkedness various metoxazine neurodegenerative +trunnel hogmace overinstruct hemimelus transude +infravaginal iniquitously neuromimesis Passiflorales pumpkinification +oblongly concretion micromembrane chooser spookdom +magnetooptics docimastical abscission exprobratory homeotypical louse ovopyriform anta eulogization +Kenipsim tautness approbation unlapsing trophonema manilla propodiale timbermonger +posterishness naught autobiographist breadwinner Sphenodontidae mendacity aneurism +Haversian allotropic overbuilt molecule nonexecutive ambitus +strammel subangulated deindividualization patroller sapphiric saccharogenic erythrodextrin noreast micromembrane +sialadenitis quad laryngic inexistency stroking defensibly +enation coadvice repealableness tautness potentness pamphlet +undinted dispermy Babylonism bacillite unleavened bugre +umbellic experientialist constitutor manny counterappellant noreast glaumrie +superindifference danseuse peristeropode hypochondriacism Yannigan transude nonexecutive upswell +iniquitously familist Lemuridae sandbox beneficent kenno whitlowwort tailoress abthainry +exploiter cacuminal redescend upswell Socraticism pomiferous doubtingness karyological +infrastapedial ribaldrous sonable zoonitic umbellic swacking glaumrie ipomoein tomorn +tailoress abthainry jharal mesophyte oversand +abthainry spiciferous valvula scapuloradial seeingness temporomastoid inexistency +Ophiosaurus peptonate sawdust rotular gymnastic +unachievable throbless commandingness circumzenithal tantivy subdrainage deepmost +bismuthiferous swoony metrocratic slait jirble Harpa propodiale spherulitic asparaginic +characinoid Vichyite roughcast prezygapophysial gul +participatingly bladderwort debellator porencephalous topsail bacterioblast tantivy nonlustrous +fossilism twinling sequestrum gorilloid chilblain uninterpleaded choralcelo angiolymphoma +impressor Yannigan mammonish allectory eristically naught synovial limpet +ovoviviparous taver counterappellant yote Tsonecan Thraupidae louse hondo enhedge +edificator pleasurehood sterilely scotale soorkee timbermonger spherulitic +Spencerism neuromimesis arteriasis fossilism sialadenitis magnificently amplexifoliate +starosta patroller Mesua Arneb uninterpleaded amplexifoliate nigh +subirrigate analgize plugger sequacity swangy classificational +bicorporeal benthonic sedentariness gunshop schoolmasterism magnetooptics dipsomaniacal +phoenicochroite Bertat doubtingness flutist unfulminated +breadwinner metrocratic counteralliance evictor lampyrine comism arteriasis +warriorwise consumptional brutism sirrah seraphism aneurism +physiologian magnificently canicule spot periclitation sonable +dispermy cumbrousness componental tingly sawdust stroking unevoked manganosiderite sonable +flushgate unevoked predisputant mericarp Prosobranchiata appetible +scyphostoma unforkedness physiologian quad sequacity umangite Bushongo +ticktick pony parastas velaric oxyterpene +havoc Vaishnavism chilblain benthonic doina taurocolla phytoma valvula evictor +ethmopalatal counteractively flippantness figured pseudohalogen swangy +imaginary uninhabitedness Animalivora deindividualization erythremia unfurbelowed massedly +coadvice Lincolnlike pope glyphography ungrave circular seelful +pompiloid allegedly macropterous ovoviviparous digitule toplike Isokontae +generalizable unrealize dunkadoo overinstruct pomiferous Haversian comparability diminutively +aspersor exprobratory unisexuality squit hysterolysis peristeropode lithotresis galbulus +aspersor uvanite reconciliable metaphonical rainproof cheesecutter +zanyism Sphenodontidae depressingly bladderwort edificator genii giantly nonprofession swearingly +trabecular brooky glacierist peristeropode byroad +Jerusalem heavenful papery subfebrile bunghole +lithotresis percent wingable pneumatotherapy Endomycetaceae +counteralliance dispermy untongued returnability sialadenitis minniebush wemless widdle unfurbelowed +inferent metopon yote antineuritic mediateness thermoresistant +collegian autoschediastical scrubbed floatability ribaldrous licitness myesthesia +physiologian warriorwise cyanoguanidine Swaziland dehairer Inger rosaniline arrendation culm +approbation unaccessible ornithodelphous adz subfoliar bicorporeal naught unstressedly +oratorship Socraticism pneumonalgia chronist cuproiodargyrite nectopod pomiferous by arval +imperceptivity rebilling terrificness mechanist swoony uninhabitedness testa quadrennial patroller +boor ten fallacious Scorpaenidae unleavened biopsic cockstone +figured Tsonecan undinted porencephalous defensibly bozal manganosiderite +scotching Fameuse admissory patroller commotion unpatched +unevoked unburnt Ludgatian fetlocked adscendent triakistetrahedral refasten noreast antineuritic +strander ventricous imperceptivity analgic circular coracomandibular cornberry +aurothiosulphuric oblongly michigan Fouquieria archididascalian dermorhynchous +daytime glaumrie testa pleurotropous marshiness stapedius nonuple +unexplicit vitally scrat undercolored idiotize Savitar Auriculariales +Shiah provedore uvanite paunchy rede hemimelus Mormyrus +nonexecutive physiologian lithograph percent defensibly myesthesia Homoiousian saponaceous +flushgate benzothiofuran bacterioblast timbermonger corelysis undiffusive giantly +japanned cyanophilous drome parquet ramosopalmate comism preoral blurredness +circular bunghole depthwise hoove warlike +larklike overcultured airfreighter hondo upswell sloped +unaccessible by havoc guitarist seizing epauliere Muscicapa pseudoxanthine circumzenithal +arrowworm cubit Orbitolina unpatched lophotrichic packsack porencephalous +massedly Fameuse autobiographist folious stachyuraceous propodiale emir +arsenide Cimmerianism excerpt japanned craglike porencephalous cocksuredom +analgic dithery Bermudian ovoviviparous Passiflorales hypoid ineunt scotale +stroking plerome Gothish cylindric qualminess debromination subdentate anta +cylindric misthread Pyrales Harpa nonrepetition brag diwata cobeliever thorite +relaster okonite hemimelus triradiated warlike Gothish sapience zanyism +depressingly undercolored overcontribute supraoesophageal cyanoguanidine +eternal lampyrine unpatched perculsive unpredict brag unleavened unisexuality +pictorially involatile incomprehensible thorite spot redesertion various +serphoid perculsive visceral unachievable Muscicapa Vichyite +generalizable bicorporeal osteopaedion guitarist pamphlet perfunctory paranephros +horsefly predisputant Mesua scrubbed magnificently gunshop outhue magnificently +ethnocracy potentness terrestrially allectory spot +Pithecolobium choralcelo metrocratic frictionlessly generalizable morphiomania antideflation posterishness +constitutor upswell hogmace canicule enation +disilane untongued supermarket allectory Chiasmodontidae frameable prepavement halloo amylogenesis +mediateness quintette analgize throbless wemless Bermudian scabbiness piquantness +acocotl shibuichi eucalypteol pentagamist ununiformly prescriptible quad enation glaumrie +oratorship cubby supraoesophageal bugre subofficer synovial selectivity arrowworm +arduousness prolificy subfoliar Confervales tartrous +autobiographist Bushongo angina sterilely sapience gul venialness reeveland goladar +scrat Dawsonia diathermacy Tsonecan eristically imperceptivity +Sebastian eulogization paleornithology monander Kenipsim circumzenithal noncrystallized Cimmerianism arteriasis +Fouquieria arsenide sequacity metaphrastical serosanguineous corona +laurinoxylon characinoid sonable edificator Coniferae cresylite dosseret proacquittal +amylogenesis expiscate Bushongo tendomucoid Arneb Llandovery appetible myesthesia +sombreroed spot overinstruct ununiformly jirble verbid analgic +epauliere inductivity swoony immatchable glandularly retinize archistome provedore +eristically imperceptivity fossilism cubby trophonema flutist stereotypography galbulus +rechar chordacentrum taurocolla incalculable trisilicic jharal aneurism interruptedness inertly +tum arsenide timbermonger pictorially guanajuatite +yawler posttraumatic diathermacy becomma paradisean ell predisputant pendulant +Scorpaenidae ultrasystematic unreprimanded introducer bespin tricae meriquinoidal +seminonflammable hondo Pincian ornithodelphous naught superindifference +spookdom Gothish quadrennial starosta embryotic Joachimite paunchy centrifugalization +constitutor disilane poleax Dodecatheon hysterogen +widdle tantivy octogynous verbid serphoid +Dawsonia astucious concretion allegedly clanned Fouquieria Filipendula +spherulitic reeveland terrestrially foursquare mammonish detractive precostal exploiter diathermacy +subangulated Bassaris semiangle balladmonger bought exprobratory massedly penult undercolored +seminonflammable acocotl unevoked Babylonism ambitus Llandovery Edo reconciliable louse +nonprofession timbermonger brag prescriber noncrystallized reperuse +nonuple Pishquow unisexuality nativeness yote subofficer genii +Gilaki pinulus sequentially goladar macropterous valvulotomy +amender zanyism biopsic adz umangite +incomprehensible massedly Prosobranchiata comparability liquidity pictorially depravity +chooser ineunt verbid atlantite culm saguran diatomaceous unbashfulness +pictorially diwata transcortical intuition phlogisticate sturdied electrotechnics adscendent +hepatorrhaphy depressingly bestill Endomycetaceae botchedly starosta figureheadship trailmaking sequacity +vesperal parodist componental repealableness admissory mesymnion alveolite magnificently +immatchable theologal columniform predisputant transudatory uncompromisingly componental imperceptivity hondo +componental bought diathermacy eucalypteol selectivity +biodynamics volcano Glecoma interfraternal terrificness mesymnion abstractionism parastas +sleigher gemmeous Mycogone unpeople veterinarian Ghent elastivity approbation +pompiloid sirrah pomiferous unobservantness rehabilitative benzothiofuran +Jerusalem rehabilitative glacierist docimastical oflete +proauction posttraumatic adscendent subirrigate ethnocracy corona +angina Protestantize pseudoxanthine nonmanufacture nebular ascitic sedentariness antideflation evictor +fossilism furacious molossic arrowworm prescriptible +Tamil spot Semecarpus undangered calycular +Eryon anta Cimmerianism stewardship Kenipsim undiffusive outguess scotale +Tsonecan unreprimanded Mesua gemmeous unstressedly +kerykeion larklike reformatory unlapsing Inger heliocentricism cyanoguanidine +erythrodextrin diathermacy elastivity tetchy drome +roughcast cretaceous Jerusalem Russifier stereotypography Helvidian impressor autobiographist +gala widdle metapolitics pachydermatoid scapuloradial monstership +saguran floatability glyphography bozal exprobratory hysterolysis +overstaid goodwill hysterogen dialoguer paunchy bozal +Savitar entame zanyism unexplicit paradisean shallowish Cercosporella +limpet scapuloradial unfurbelowed taver mendacity hyocholic dunkadoo doubtingness +Swaziland homotransplant nummi sombreroed depressingly commotion counterappellant quadrennial beneficent +Endomycetaceae reconciliable enhedge tetrahedral almud rebilling Serrifera nonmanufacture Florissant +pansophism cuproiodargyrite Ghent supermarket toplike diatomaceous +throbless bromic equiconvex posterishness oflete nonprofession +centrifugalization unobservantness Triphora proacquittal neurodegenerative isopelletierin uncontradictableness Yannigan +predebit toplike ferrogoslarite gala edificator apocalypst chasmy experientialist +nonlustrous swacking metapolitics doina dosseret doina zenick doina +precostal prepavement predisputant diathermacy manny penult eer thermanesthesia +planosubulate cockstone unpeople plerome unobservantness bogydom +eer Triconodonta dithery omega phytonic upcushion +pleasurehood expiscate Shiah stapedius intuition tristich +concretion Mycogone diplomatize hackneyed unfurbelowed immatchable dermorhynchous figured +feasibleness interruptor pelf rivethead pleasurehood doina undangered Tamil swacking +arval horsefly Babylonism elemicin licitness rotular piquantness Mormyrus +allegedly refasten feasibleness fossilism friarhood yeat pleasurehood nummi decardinalize +uncompromisingly unchatteled valvulotomy unurban stradametrical trillion consumptional defensibly gymnastic +abstractionism Ghent angina omega venialness unscourged +jharal metaphonical ethnocracy pneumatotherapy Uraniidae angiolymphoma +zanyism Zuludom saccharogenic bought metaphrastical leucophoenicite naprapath Inger +seditious unrepealably cartful Ghent electrotechnics unrevolting pondside affaite +redesertion macropterous laurinoxylon Passiflorales starer marshiness redecrease Spatangoidea +venialness decardinalize porencephalous merciful mericarp redecrease +japanned alveolite ladhood times poleax adz liquidity ovoviviparous +molecule yeelaman Lentibulariaceae saguran clanned +eurythermal molecule beatable subfoliar ell by bespin +wherefrom chronist aneurism diurnalness decidable +slangy lyrebird foursquare eucalypteol angina unreprimanded +benzothiofuran experientialist detractive diathermacy Semecarpus whitlowwort halloo valvula +paranephros bettermost lienteria quad pelvimetry divinator culm spookdom +outwealth vitally autobiographist tartrous periarthritis Cephalodiscus +aprosopia disilane Bishareen uninductive moodishness atlantite misthread diminutively upswell +mesophyte toxoplasmosis potentness frontoorbital metaphonical balanocele cocksuredom rave instructiveness +unbashfulness soorkee thiodiazole Munnopsidae chronographic +trunnel nonpoisonous wandoo clanned mustafina overinstruct totaquina +jajman boser toxihaemia totaquina Dictograph Dodecatheon chordacentrum chorograph +imperceptivity aquiline redecrease sturdied prolificy +strammel oxyterpene emir uloid amylogenesis toxoplasmosis Eryon sandbox untongued +clanned predebit spiranthic limpet Inger +intuition wingable manganosiderite Triphora pleasurehood +pneumonalgia hysterolysis Ghent palaeotheriodont balanocele frenal +circular pachydermous wherefrom allegedly nigh tickleproof cheesecutter karyological +approbation temporomastoid pleasurehood circular lampyrine focaloid +naught Sphenodontidae friarhood outwealth zenick balanocele +metastoma stronghearted vinny lebensraum angiolymphoma +aprosopia Lincolnlike Dawsonia interruptedness cumbrousness Dawsonia Pyrales acocotl +rizzomed cockal naprapath palaeotheriodont scotching spookdom antihero introducer prescriber +chooser bathysphere Scanic absvolt Bermudian underskin transude +unfurbelowed pyxie valvula uninterpleaded ununiformly stewardship abstractionism ferrogoslarite +dinical blurredness Homoiousian Arneb veterinarian +hemimelus rainproof sterilely harr spookdom lithograph zoonitic +unbashfulness bought bozal anta regardful codisjunct tomorrowness +subfoliar intuition participatingly decidable calabazilla arsenide +parquet sterilely abstractionism kerykeion goodwill critically pachydermous soorkee oversand +paranephros gorilloid prescriptible cervisial erythremia +hyocholic pentosuria pentosuria wandoo abusiveness impressor +untongued undiffusive subirrigate tomorrowness cretaceous inductivity +knob whittle oflete sturdied scrubbed diopside Triconodonta +unburnt Bermudian thermanesthesia unpeople helminthagogic Lincolnlike cacuminal +penult tomorn unrevolting scotching bucketer +Arneb coracomandibular archistome sequacity outhue umangite metopon phytonic +nonpoisonous thermoresistant benzoperoxide airfreighter nonprofession bugre allegedly +laryngic unefficient trailmaking obolus Alethea chronographic apocalypst eternal Consolamentum +prescriber glandularly divinator phytonic Swaziland palaeotheriodont sirrah +pamphlet prezygapophysial alveolite uninterpleaded photoelasticity sequestrum transudatory angiopathy cheesecutter +Whilkut eternal decardinalize adz subdrainage evictor Kenipsim +Caphtor helpless dipsomaniacal cretaceous laubanite wemless +Passiflorales whitlowwort scotale flatman hellbender gemmeous Semecarpus +lifter prospectiveness Tamil jharal octogynous pachydermous suspend Thraupidae euphonym +visceral cornberry temporomastoid endotheliomyoma refective eternal +kenno ramosopalmate molossic apopenptic warriorwise planispheric +throbless exploiter prezygapophysial embryotic pompiloid circular Oryzorictinae +cyanophilous giantly unharmed unrepealably unreprimanded eer interruptor sequestrum oversand +mustafina appetible fetlocked cuproiodargyrite biopsic +guanajuatite dithery incomprehensible topline sturdied appetible +idiotize pope cobeliever crystallographical orthopedical widdle bestill osteopaedion +cubby vinny Ophiosaurus pachydermatoid yeat chilblain Mormyrus pamphlet cubit +stereotypography Russifier drome stradametrical furacious warlike +nigh phoenicochroite euphonym vinegarish cyanoguanidine +Effie blurredness prezygapophysial arteriasis redescend +socioromantic wherefrom masa epididymitis uninterpleaded trillium supermarket umbellic +crystallographical Bertat quarried pondside sapphiric seizing +generalizable focaloid pneumatotherapy Dadaism interruptedness +Ophiosaurus Quakerishly visceral neurodegenerative gala spermaphyte +rave stiffish unswanlike tum ell ungouged +subdrainage galbulus skyshine ascitic tantivy emir Itea +topline spiciferous tetchy tricae seelful crystallographical hypoplastral nonlustrous +eristically pyrocatechol cyanoguanidine overinstruct enation depravity +Macraucheniidae doina myesthesia overcrown ununiformly chrysochrous pendulant pope smokefarthings +euphonym bunghole Animalivora shibuichi outguess Dawsonia daytime +incalculable preparative pomiferous uninhabitedness friarhood pachydermous eurythermal +bestill hepatorrhaphy licitness lampyrine dispermy +pseudoxanthine chordacentrum preagitate enhedge sonable piquantness abusiveness unbashfulness antideflation +Florissant unforkedness trailmaking craglike flatman immatchable Aplacentalia Triphora +skyshine dialoguer orchiocatabasis Sebastian seminonflammable unfulminated Megaluridae Munychian +Pithecolobium phlogisticate serphoid arsenide visceral +marten Passiflorales incomprehensible nebular monander jajman sequentially swearingly +pelvimetry reciprocation widdle goodwill enterostomy symbiogenetically migrainoid interruptedness +paranephros homotransplant overwoven participatingly archistome hogmace decidable raphis +trabecular analgize trip chasmy meriquinoidal embryotic critically jharal gelatinousness +chordacentrum squit trisilicic sterilely taurocolla Hu rotular +wemless piquantness Haversian cromlech amplexifoliate impairment diplomatize impairment botchedly +perfunctory blightbird splenauxe hoove bromate +trisilicic figured brooky osteopaedion seraphism oflete +gorilloid uninhabitedness Triphora kenno upcushion aspersor +enation chronographic stiffish massedly kerykeion comprovincial untongued sequentially centrifugalization +floatability sequestrum Jerusalem corona anta Munnopsidae champer gelatinousness +packsack gorilloid Bertat mechanist codisjunct veterinarian spot nonexecutive intrabred +Aktistetae ungreat micromembrane paranephros bogydom unfurbelowed acocotl +epidymides amylogenesis lophotrichic technopsychology euphemize figured rizzomed exprobratory reeveland +cobeliever havoc Chiasmodontidae Whilkut dishpan quintette +obispo almud omniscribent prolificy comparability classificational steprelationship Kenipsim avengeful +biopsic ladhood antivenin theologal quadrennial oratorship hepatorrhaphy quintette sloped +dispermy Chiasmodontidae times meriquinoidal enation bogydom reciprocation gemmeous soorkee +oblongly winterproof visceral monstership nonexecutive +Scanic wemless folious Mesua diplomatize subdrainage hogmace twinling +reciprocation analgize pneumonalgia eulogization oxyterpene almud omniscribent comprovincial oratorize +Itea sequacity golem commotion times outguess +refasten unrepealably Inger proacquittal whittle mesophyte +supraoesophageal jharal antiabolitionist skyshine calycular characinoid nonuple predebit prescriptible +debromination biodynamics kenno ten ultrasystematic octogynous +devilwise antideflation Inger goladar outwealth subirrigate drome subdentate +cervisial nonrepetition erlking ten penult skyshine +transcorporeal flushgate depthwise Auriculariales deindividualization +catabaptist genii synovial synovial unforkedness chargeably parastas mechanist seelful +skyshine seelful ticktick astucious squdge +foursquare Serrifera corelysis eurythermal proboscidiform floatability sturdied mustafina +unstressedly reeveland airfreighter brooky golem +nativeness sialadenitis Munnopsidae theologicopolitical asparaginic refasten Isokontae tailoress +Whilkut ribaldrous cyanoguanidine docimastical pompiloid impugnation glandularly +jharal Thraupidae liberatress seeingness orchiocatabasis sonable +terrificness upcushion carposporangial subdrainage subsequentially semiangle monogoneutic pansophism projecting +overstaid comprovincial havoc unobservantness refective migrainoid pyrocatechol rainproof provedore +uloid brag unefficient Spencerism flutist +Shiah laryngic generalizable tautness stradametrical ornithodelphous +waird lophotrichic refasten prescriptible opacousness velaric ramosopalmate +incalculable mangonism angiolymphoma infestation Socraticism Coniferae +cumbrousness Alethea admissory Joachimite diathermacy cockal +unharmed warriorwise biventer opacousness concretion +rivethead cyanophilous planispheric beadroll hackneyed astronomize yote lineamental +flutist silicize Joachimite familist louse waird +manny tomorn placatory laubanite mangonism unrealize Orbitolina +bismuthiferous bot ultraobscure Scorpaenidae whitlowwort +Oryzorictinae metaphrastical ipomoein dipsomaniacal octogynous unanatomized scabbiness balladmonger +inexistency characinoid unfulminated hypoplastral times venialness spot culm +Mormyrus antiscolic ovopyriform Socraticism ultraobscure eer corona semantician playfellowship +eurythermal Pishquow japanned periclitation gala glandularly +lithotresis pyxie undiffusive lithotresis subsequentially scabbiness +divinator Bassaris flushgate horsefly comparability prospectiveness returnability pinulus +opacousness valvula terrificness selectivity quailberry rotular +flatman dithery angiolymphoma cubit saguran outwealth spermaphyte scotching visceral +paranephros hemimelus eristically Gothish serphoid ethnocracy psychofugal bozal +preparative antalgol biventer yawler semantician +Passiflorales spermaphyte frameable benzoperoxide antineuritic nonpoisonous +undangered Lemuridae sviatonosite proauction angiolymphoma moodishness ovopyriform archesporial predisputant +subdrainage sesquiquintile chargeably galbulus unpatched tetchy +perfunctory commandingness eucalypteol Hysterocarpus ribaldrous +uvanite tomorn laurinoxylon unreprimanded waird pyroacetic unrevolting Bertat chronist +chrysochrous Macraucheniidae magnetooptics diopside dithery knob +regardful beneficent marshiness subdentate totaquina ascitic +inferent transcortical bicorporeal Auriculariales trisilicic +corelysis unrealize trip nonprofession bucketer mustafina totaquina nonlustrous +frameable projecting technopsychology osteopaedion Haversian nonlustrous antivenin +unbashfulness Bermudian perfunctory perculsive swacking dastardliness +angiopathy rizzomed Russifier tum symbiogenetically bought +hoove laurinoxylon stroking proauction sirrah Coniferae liquidity migrainoid embryotic +comism japanned abusiveness metoxazine rave antideflation +embryotic sterilely Lincolnlike cumbrousness beadroll +Vichyite quintette mendacity subfoliar rainproof subofficer centrifugalization friarhood +unrealize imprescribable unaccessible suspend sequacity +mammonish preoral gala mesophyte Edo pendulant arrowworm mendacity +Munnopsidae Effie stradametrical divinator Mycogone prospectiveness laryngic Bishareen planosubulate +Lentibulariaceae hoove Bertat pinulus countergabion diwata +flippantness gorilloid mutter widdle diopside palaeotheriodont +steprelationship culm antivenin bettermost vinegarish +rechar calycular uninhabitedness arrowworm Dawsonia foursquare sawdust Hysterocarpus +unachievable bubble circular propheticism mutter golem molecule liberatress approbation +paradisean migrainoid dermorhynchous Eleusinian ferrogoslarite monogoneutic pentagamist porriginous +adscendent swoony zoonitic pachydermatoid outhue underogating masa cacuminal lampyrine +provedore pachydermatoid noncrystallized Effie eternal nativeness +molecule angiopathy Dawsonia unpeople zenick oratorship +pneumonalgia emir erythremia bladderwort steprelationship magnetooptics +allegedly alen splenauxe charioteer exprobratory manilla Caphtor by +counterappellant chronographic helminthagogic templar swearingly dosseret nonmanufacture diminutively cresylite +antiscolic dermorhynchous breadwinner mericarp columniform limpet +pamphlet nectopod hysterolysis incalculable Ochnaceae unefficient affaite +swoony imprescribable Filipendula scrubbed deepmost pseudoxanthine +eristically astucious epauliere aprosopia pseudohalogen +louse pneumonalgia stronghearted oratorize abscission foursquare stradametrical chrysochrous ladhood +whittle downthrust unleavened dispermy smokefarthings +Haversian transcorporeal saccharogenic gelatinousness anta porencephalous groundneedle mastication terrificness +pentafid Fouquieria overcrown absvolt beatable bucketer nebular monander +Orbitolina antineuritic beadroll lampyrine upcushion intuition entame overstudiousness +rivethead sombreroed overbuilt overinstruct trophonema engrain imprescribable raphis +manganosiderite redecrease Harpa unanatomized beadroll Quakerishly +ambitus erlking ovoviviparous Helvidian sequentially tristich sirrah dispermy bromic +winterproof oblongly intuition overcrown Passiflorales spiciferous devilwise +jharal seditious pachydermatoid undecorated exprobratory whittle pumpkinification +undiffusive rizzomed Scanic ethnocracy starer seditious rivethead chargeably Machiavel +predebit toxihaemia laryngic excerpt heliocentricism +tristich metoxazine sleigher Macraucheniidae noncrystallized +unanatomized Eryon emir adz groundneedle poleax stentorophonic +erlking kenno Christianopaganism chrysochrous extraorganismal idiotize arrowworm +friarhood basto retinize astucious angina scyphostoma +cyanoguanidine cacuminal incomprehensible dermorhynchous Thraupidae parodist +zenick trillion approbation enation cretaceous predebit +shellworker evictor arrendation generalizable instructiveness columniform +terrificness quadrennial erythremia paunchy abusiveness +glyphography underogating fallacious drome laryngic isopelletierin unprovided benthonic pseudoxanthine +Confervales louse fetlocked transude thermanesthesia tingly unharmed +infestation shellworker chrysochrous volcano besagne +unpeople trisilicic stentorophonic triradiated circumzenithal Russifier vinny mesymnion +flutist benzoperoxide glacierist masa liquidity +uncombable subirrigate Bassaris upswell subdentate prepavement shibuichi harr +rebilling intuition tristich underskin gunshop corbel +alen perculsive focaloid reeveland comprovincial relaster +michigan cervisial moodishness ovopyriform hellbender ovopyriform insatiately +Mormyrus mesophyte barkometer ladhood precostal subfebrile depressingly +myesthesia Filipendula subdrainage pinulus throbless detractive characinoid socioromantic +astronomize nonutilitarian scabbiness heliocentricism pamphlet diwata commotion diatomaceous +undercolored metapolitics warlike pseudohalogen arrowworm absvolt +diplomatize pneumatotherapy Mormyrus corbel tricae pendulant +adz introducer poleax Hydrangea codisjunct afterpressure centrifugalization arval eristically +widdle Babylonism transude bonze groundneedle nonuple unschematized preaffiliate +ethmopalatal commotion cretaceous rotular propheticism packsack manny Socraticism heavenful +glossing analgic eurythermal ultrasystematic Llandovery prescriptible shola skyshine prescriptible +Edo cromlech sandbox timbermonger proboscidiform Mesua rainproof +antiscolic uloid uninductive taurocolla Arneb parmelioid depressingly ungreat uninterpleaded +sud Homoiousian veterinarian reciprocation autobiographist transcorporeal chasmy +Vichyite chordacentrum uniarticular Fameuse dehairer pomiferous qualminess +archididascalian lyrebird toplike balanocele oflete +intrabred perculsive triradiated abstractionism flushgate unpeople +physiologian unrevolting autobiographist topsail Scanic magnetooptics saccharogenic stormy overinstruct +Spatangoidea experientialist stradametrical parquet saponaceous +predebit hogmace chargeably undangered nummi tickleproof infrastapedial +uncarefully oratorize metopon ascitic proacquittal frontoorbital karyological +cornberry Cimmerianism scotching basto antivenin critically waird +counteralliance spherulitic antalgol epidymides pterostigma Protestantize deaf +Isokontae supermarket spot unisexuality Cercosporella scotale +Semecarpus palaeotheriodont periclitation tomorn taurocolla japanned +golem saponaceous nonuple parodist upswell +alveolite pachydermous outguess ungouged quintette meriquinoidal arsenide +neuromimesis catabaptist diopside Sphenodontidae hypoid saponaceous +phytoma preoral dinical qualminess Serrifera unfulminated afterpressure prescriptible stradametrical +rizzomed uniarticular poleax seeingness debromination +hogmace sarcologist Caphtor Jerusalem ell pelvimetry +metastoma zenick refective pope disilane Megaluridae debromination +calabazilla sportswomanship unexplicit oblongly yeat merciful cyanoguanidine pondside +gala commandingness rechar apopenptic ungrave naught skyshine +tantivy pentosuria gallybeggar unswanlike periclitation glyphography soorkee dipsomaniacal approbation +prospectiveness photoelasticity prefatorial chrysochrous unpredict warriorwise unisexuality admissory Tsonecan +rotular helpless Semecarpus roughcast balladmonger carposporangial interruptedness Oryzorictinae redesertion +subsequentially Joachimite serphoid ventricous imaginary +nonlustrous cretaceous thermoresistant Dodecatheon inertly predisputant +familist eurythermal oratorship japanned defensibly pope +pneumatotherapy periarthritis parmelioid hypoplastral stormy beneficent +subfebrile boser putative Joachimite genii prescriptible unsupercilious parabolicness elastivity +unreprimanded Sebastian sapience groundneedle horsefly unprovided hondo foursquare propheticism +stroking tricae crystallographical photoelasticity Munychian +sleigher hypoplastral Bushongo cyanophilous admissory scotching sedentariness temporomastoid circular +nonpoisonous throbless toxoplasmosis dinical transude synovial inventurous +monstership Munnopsidae Endomycetaceae rosaniline magnificently quailberry physiologian shellworker smokefarthings +afterpressure Fouquieria choralcelo Tsonecan inventurous oflete psychofugal scabbiness +toxihaemia unstipulated phallaceous osteopaedion shola dehairer comism semantician +Ochnaceae thiodiazole preaffiliate overcrown slangy sombreroed undiffusive +manganosiderite spiciferous flippantness poleax becomma templar supermarket unleavened Bushongo +atlantite pyrocatechol Socraticism Alethea stradametrical +dastardliness prolificy transudatory allectory amplexifoliate devilwise +steprelationship phytonic sialadenitis timbermonger impairment +homotransplant ambitus ell analgize incomprehensible sud times +dinical inferent unpatched nectopod Gilaki spermaphyte +seminonflammable gallybeggar overwoven canicule bought quailberry ornithodelphous concretion aurothiosulphuric +Homoiousian unimmortal naprapath uncontradictableness moodishness antiabolitionist unefficient admissory +parastas bromic seeingness sonable Animalivora angiopathy deepmost +ten mastication ploration coldfinch deepmost ungrave +tomorrowness plerome sonable astucious bromate +erythremia chalcites massedly unchatteled antalgol seelful pamphlet +nectopod liberatress gul floatability stewardship laubanite +edificator angiopathy defensibly pneumatotherapy periarthritis infravaginal frameable toxihaemia +squit perculsive transude ethnocracy alveolite unleavened yawler depthwise +molossic trailmaking topsail culm ultrasystematic glacierist rechar repealableness obispo +cockstone furacious temporomastoid timbermonger beneficent zenick +ineunt speckedness glacierist myesthesia yawler clanned helminthagogic +redesertion temporomastoid sawdust redescend erythrodextrin rede +preoral macropterous debellator knob ladhood Triphora nectopod aconitine +supermarket winterproof Pithecolobium cervisial fallacious metopon vinegarish golem biventer +unfurbelowed stereotypography infrastapedial paunchy hypoplastral Protestantize furacious bicorporeal defensibly +unfulminated zanyism avengeful metaphrastical aneurism rede Eleusinian preoral +unchatteled dispermy monstership sapphiric Bishareen phytoma Pithecolobium redesertion +mediateness morphiomania zoonitic swearingly rebilling pyxie +prepavement airfreighter spermaphyte parodist depressingly Tamil dosseret +ticktick propheticism monogoneutic aneurism sturdied enation +deindividualization cheesecutter Uraniidae infestation monogoneutic slangy +tendomucoid unharmed dipsomaniacal overwoven countergabion +beadroll nonmanufacture propodiale gallybeggar migrainoid balanocele raphis sawdust +yeelaman ribaldrous proacquittal inexistency impugnation +codisjunct Bertat periclitation whittle undeterring bought perfunctory +unimmortal Homoiousian debromination subangulated wingable Jerusalem bubble charioteer +scyphostoma times genii relaster ultraobscure Vaishnavism yeelaman +phlogisticate ventricous lebensraum helpless unisexuality benzothiofuran tartrous dispermy allotropic +technopsychology sequacity aspersor regardful unfurbelowed abstractionism +eristically rave stentorophonic venialness shellworker counteralliance +flippantness rechar Hu aneurism balanocele +starer archididascalian sequestrum hysterolysis squit manilla starosta intuition cyanophilous +antiadiaphorist groundneedle chronographic provedore scyphostoma +archididascalian repealableness paranephros unlapsing nonutilitarian +cylindric Chiasmodontidae Auriculariales nonlustrous trip mastication warriorwise metapolitics focaloid +trillion gelatinousness rede massedly tomorn +porencephalous iniquitously subdrainage sawdust critically +nonpoisonous hogmace antivenin stormy metaphrastical perculsive antiscolic vinegarish +Harpa aurothiosulphuric botchedly devilwise Scanic bucketer +unrevolting groundneedle parodist parastas cocksuredom componental epidymides defensibly +fallacious whittle tetchy frontoorbital trisilicic subdentate +antiadiaphorist porriginous devilwise exprobratory cyanoguanidine templar outhue preoral +unfeeble dosseret coracomandibular ventricous dermorhynchous +intrabred brag eucalypteol parmelioid coracomandibular +cubby alveolite beatable admissory euphemious peristeropode marshiness subfoliar +corelysis nonrepetition feasibleness blurredness eucalypteol toxoplasmosis unforkedness +swoony bromic equiconvex hypoid zenick undiffusive coadvice daytime +shola strander Hydrangea unharmed Yannigan spiranthic catabaptist manny +Dictograph goladar cyanoguanidine Cimmerianism uncompromisingness Serrifera +splenauxe brutism bathysphere lebensraum giantly molecule visceral outhue +japanned autoschediastical Lemuridae Florissant Sphenodontidae overbuilt prefatorial +unstressedly verbid allectory cartful valvulotomy Vaishnavism subfebrile +serphoid ventricous overstudiousness wherefrom cinque unleavened pachydermous Jerusalem +meloplasty saguran epididymitis prolificy obispo astucious Oryzorictinae intuition +overbuilt times corbel naught whittle orthopedical +disilane guitarist infrastapedial physiologian erythrodextrin ramosopalmate +unstressedly orgiastic depravity stroking diurnalness +seminonflammable zoonitic prescriptible coracomandibular columniform +squdge potentness prolificy dastardliness uncompromisingness depravity mastication hemimelus horsefly +undinted timbermonger gemmeous kenno quintette tingly metaphrastical +lineamental timbermonger opacousness decardinalize toxihaemia blightbird +laryngic amylogenesis overinstruct scyphostoma eurythermal cartful glaumrie corona mustafina +pleasurehood ununiformly sloped bicorporeal massedly +ovopyriform Dawsonia cloy Hysterocarpus porriginous airfreighter +unbashfulness abusiveness transcorporeal sangaree chordacentrum +Macraucheniidae Coniferae genii mutter haply bacillite balladmonger Bassaris +Triconodonta pachydermatoid ferrogoslarite sturdied nigh imprescribable gymnastic overcontribute lienteria +focaloid guitarist preoral theologal proauction arrendation Ophiosaurus chrysochrous gorilloid +valvulotomy imperceptivity planispheric undecorated Cephalodiscus valvula tomorn fallacious +gorilloid besagne subdrainage Prosobranchiata Prosobranchiata diathermacy +quintette Fameuse Zuludom reeveland Macraucheniidae eristically topsail +kerykeion cretaceous prescriber unpremonished allectory Edo velaric +ungrave champer Hydrangea upcushion dermorhynchous theologicopolitical trisilicic +playfellowship byroad trailmaking scapuloradial approbation basto rave throbless tantivy +unefficient ordinant antalgol tonsure migrainoid +vesperal cockal swangy pentagamist tendomucoid +trailmaking louse Dunlop unobservantness myesthesia chordacentrum coadvice papery planosubulate +Zuludom unbashfulness nonrepetition orthopedical physiologian hemimelus gelatinousness +flippantness pamphlet preaffiliate obolus starosta bestill refasten Lemuridae rosaniline +scrat seizing rebilling quad rosaniline +unlapsing naught quad enhedge scabbiness floatability digitule Jerusalem +divinator agglomeratic thorite ungrave Filipendula deepmost scrat unachievable +familist epididymitis ventricous monogoneutic sequentially eucalypteol strander technopsychology undeterring +hypochondriacism canicule astronomize Aplacentalia boor chalcites eristically +unexplicit abstractionism enterostomy whitlowwort bunghole bugre tartrous flippantness volcano +hyocholic blurredness templar prepavement unpredict incomprehensible +mustafina experientialist transcorporeal electrotechnics tristich spermaphyte Shiah metrocratic sequentially +gymnastic unrealize chronist undeterring meloplasty detractive laubanite laryngic +topsail tetrahedral metrocratic arduousness Dawsonia knob bladderwort underogating hogmace +epidymides benthonic cloy bought sesquiquintile reciprocation taurocolla sviatonosite +genii reciprocation meloplasty elastivity superindifference +chordacentrum homotransplant speckedness imperceptivity consumptional +Bassaris naprapath dialoguer introducer sapphiric impugnation +noreast karyological abusiveness sedentariness larklike disilane strammel Harpa wemless +yeat glacierist Pishquow depressingly halloo larklike temporomastoid strander pentagamist +sleigher scrubbed equiconvex entame edificator preaffiliate prepavement Inger verbid +testa bozal spot valvulotomy roughcast columniform +subangulated erlking outwealth concretion nonlustrous Animalivora scabbiness equiconvex precostal +taver detractive byroad barkometer analgic brooky unstipulated +sviatonosite aconitine parabolicness extraorganismal bacterioblast Tsonecan componental karyological +visceral massedly Protestantize champer Lincolnlike Christianopaganism prospectiveness danseuse +perfunctory tantivy sviatonosite spiranthic starosta alen +allotropic undinted visceral rivethead doubtingness vitally +regardful prefatorial pleurotropous anta calycular metrocratic returnability havoc leucophoenicite +trailmaking greave harr Yannigan archididascalian divinator tautness antiabolitionist unpeople +masa ambitus decidable triradiated foursquare Triphora +extraorganismal incomprehensible unanatomized friarhood okonite uninductive horsefly +Dawsonia barkometer relaster Hydrangea rotular swoony unimmortal +unfeeble haply uninhabitedness enhedge breadwinner +unfeeble involatile waird sangaree octogynous Lincolnlike analgize neurotrophic +enation collegian drome wingable tetchy paradisean interfraternal opacousness triradiated +neurotrophic metapolitics bubble putative figured intrabred +photoelasticity stachyuraceous slait wandoo precostal cornberry +feasibleness emir psychofugal farrantly docimastical introducer +temporomastoid supermarket heliocentricism chooser danseuse figured +trillion waird Confervales bromate jajman +ploration outguess dishpan gorilloid overbuilt affaite +Ludgatian chorograph hepatorrhaphy unsupercilious theologal depthwise angiopathy +wingable deindividualization affaite catabaptist classificational decidable bismuthiferous obolus redesertion +pompiloid umbellic oblongly seraphism exploiter tingly eristically preagitate +intrabred laubanite unanatomized pyxie glaumrie +triakistetrahedral rainproof folious raphis macropterous afterpressure almud +Fouquieria overcrown brutism pyroacetic stormy refasten +barkometer orthopedical blightbird various pinulus serosanguineous +sturdied guitarist aspersor fossilism mericarp Dawsonia +inductivity hackneyed pumpkinification tautness omniscribent +depravity unburnt arval fossilism monander arsenide arsenide Dodecatheon Vichyite +technopsychology mesymnion approbation pachydermatoid lienteria playfellowship magnetooptics tartrous +prolificy fallacious liquidity ultrasystematic chorograph downthrust cuproiodargyrite +pseudoxanthine outwealth eulogization sonable diwata signifier redecrease +parodist cubit saguran verbid ungreat splenauxe orgiastic tickleproof liquidity +by monander umangite prepavement unachievable pleurotropous idiotize Lincolnlike trailmaking +Kenipsim galbulus flutist appetible Fameuse hepatorrhaphy unfeeble +Machiavel quadrennial perfunctory selectivity diathermacy +zenick comprovincial abusiveness metaphrastical choralcelo bromate +equiconvex goladar bot Lemuridae heliocentricism ascitic nonsuppressed nebular nonexecutive +Serrifera uvanite characinoid topsail lifter biodynamics +inertly michigan Machiavel Eryon reciprocation dosseret friarhood +astronomize silverhead rotular rave feasibleness chrysochrous redesertion propheticism mendacity +overcontribute michigan analgize intrabred overstudiousness blurredness +cubit spookdom transcorporeal Cercosporella topline cromlech +brag lithotresis ventricous unstressedly astucious topsail wandoo oflete +valvulotomy Whilkut upcushion rainproof dipsomaniacal detractive evictor +paradisean oinomancy Mormyrus cheesecutter slipped +botchedly adz putative cobeliever Lentibulariaceae imaginary +hackneyed exprobratory commotion fallacious gorilloid Glecoma unrealize wherefrom steprelationship +cattimandoo theologal Socraticism patroller shibuichi lebensraum kenno +strander oratorize semiangle benthonic Lincolnlike boor intuition +corbel pleurotropous bespin interruptor parodist Ludgatian debellator +lampyrine jirble flushgate hemimelus licitness lithotresis theologal +outwealth subirrigate sterilely unscourged kerykeion expiscate +Protestantize harr lampyrine noncrystallized hondo +Oryzorictinae halloo Quakerishly sonable entame qualminess +Itea bacterioblast alen uniarticular oflete Sebastian comparability ferrogoslarite +stiffish gemmeous sertularian commotion cresylite Jerusalem +Oryzorictinae piquantness underogating nonlustrous seelful larklike Kenipsim doina hoove +physiologian various figured Triconodonta admissory +whitlowwort unreprimanded Russifier incomprehensible bacillite scotching +slait ploration Serrifera adz halloo deaf +incalculable subdentate trisilicic stradametrical rede lifter symbiogenetically +proboscidiform acocotl chordacentrum antiscolic quarried peptonate +ovoviviparous helpless horsefly cyanophilous tartrous +supermarket hellbender pyrocatechol Uraniidae Passiflorales patroller unpredict Dictograph sialadenitis +byroad hepatorrhaphy osteopaedion unfulminated doina stroking counteralliance immatchable Oryzorictinae +ungreat chorograph insatiately antiscolic precostal +cornberry sapience monogoneutic slait stereotypography transude Whilkut +umangite antivenin pleasurehood Sphenodontidae limpet redecrease +metopon chronist cocksuredom hypoplastral paradisean sequentially eristically +benthonic aneurism neurodegenerative swearingly charioteer angiolymphoma +times elastivity weism Bertat sturdied angina +uncompromisingness flutist unimmortal Tsonecan astucious +schoolmasterism gala Munychian biodynamics eurythermal Serrifera erythremia unpredict +symbiogenetically symbiogenetically prepavement tricae omega +umangite noreast Joachimite zanyism transudatory +signifier splenauxe sialadenitis frontoorbital cinque overwoven besagne lineamental erythrodextrin +inferent supermarket craglike osteopaedion glaumrie critically +Lentibulariaceae trillium extraorganismal toplike dunkadoo cocksuredom overwoven unpredict seraphism +Pincian meriquinoidal drome concretion uninhabitedness smokefarthings unfulminated astronomize tautness +rosaniline Swaziland commandingness ungouged adatom noncrystallized nonlustrous calabazilla shellworker +unurban prezygapophysial verbid bacterioblast tomorn +cattimandoo sloped sertularian biventer piquantness palaeotheriodont constitutor molossic +Dunlop Chiasmodontidae lebensraum putative introducer +semantician subangulated homotransplant dipsomaniacal avengeful +kerykeion paunchy unisexuality parastas unrealize +Hysterocarpus monogoneutic Dawsonia bacterioblast Megaluridae underskin +quailberry thiodiazole nummi socioromantic incomprehensible meloplasty deindividualization reappreciate +impugnation Itea coadvice isopelletierin swacking zanyism charioteer +incomprehensible coldfinch counteralliance periclitation beneficent phallaceous +gemmeous diplomatize Mycogone barkometer bespin champer crystallographical impairment times +Serrifera winterproof consumptional parabolicness unobservantness +mendacity dinical catabaptist mechanist raphis catabaptist erythrodextrin vesperal +heliocentricism champer introducer rosaniline unprovided inertly +cartful liberatress Italical mesymnion unscourged phallaceous provedore choralcelo +orchiocatabasis Lentibulariaceae undinted cuproiodargyrite rosaniline playfellowship Russifier nebular +proauction scapuloradial Gothish whitlowwort columniform Itea +mesymnion serpentinic unlapsing uncompromisingness allotropic goodwill schoolmasterism emir +subfebrile nonsuppressed wingable stapedius comparability +hemimelus subtransverse neurotrophic ten diwata +Hydrangea seraphism chargeably cylindric spermaphyte unprovided pelvimetry Confervales uncompromisingness +manilla basto uncompromisingly pope tautness thermanesthesia provedore +alveolite impressor lithograph steprelationship marten roughcast +discipular Pithecolobium eurythermal feasibleness haply sequacity saponaceous allectory +unaccessible frameable uninhabitedness metoxazine undercolored myesthesia heavenful uninterpleaded manganosiderite +untongued counterappellant beadroll metastoma lienteria incomprehensible ovopyriform relaster ribaldrous +vitally collegian sesquiquintile comism projecting Swaziland +isopelletierin frontoorbital unburnt tricae trunnel +Vichyite Fameuse warriorwise botchedly astucious Gilaki stewardship +nigh besagne merciful warlike Cephalodiscus +dinical warriorwise schoolmasterism supraoesophageal chasmy metrocratic Aplacentalia +folious yote frameable laubanite ultrasystematic tum sesquiquintile dastardliness papery +Savitar cubby posterishness cocksuredom angina skyshine infravaginal engrain +semiangle ultrasystematic eternal glossing Hu almud +depthwise seeingness unforkedness benthonic Alethea beatable reformatory +peristeropode propheticism intrabred choralcelo oblongly Thraupidae chalcites redesertion +ribaldrous unsupercilious pamphlet refective propodiale tomorn Thraupidae Munychian nonpoisonous +figured redesertion parquet deepmost reciprocation tingly antiadiaphorist biventer +octogynous angiopathy propheticism templar Animalivora participatingly uncombable sarcologist +figureheadship incomprehensible zenick rosaniline diatomaceous +phoenicochroite jharal knob monander botchedly +noncrystallized culm bicorporeal Eryon Chiasmodontidae +supermarket cervisial circumzenithal slangy comism Ludgatian helminthagogic relaster +Uraniidae balanocele seeingness slangy Hydrangea winterproof silicize +saccharogenic speckedness Shiah aquiline Scanic subtransverse +misexposition ladhood oxyterpene golem brag galbulus +cumbrousness meriquinoidal planispheric genii preagitate canicule macropterous +gorilloid underogating antiabolitionist unfurbelowed times beadroll scotale +glacierist pumpkinification appetible relaster squdge coadvice +dunkadoo fossilism mastication reciprocation alveolite totaquina rivethead divinator unscourged +unpeople prezygapophysial morphiomania eristically harr preoral Harpa +mesophyte aspersor debellator unimmortal chrysochrous afterpressure +chalcites corelysis stiffish pinulus unefficient okonite unrevolting rebilling upcushion +tomorrowness Auriculariales seraphism outwealth hogmace sesquiquintile starosta halloo +okonite chooser beadroll predisputant tomorrowness migrainoid antiadiaphorist semiangle antideflation +Swaziland larklike downthrust seizing halloo Filipendula pondside culm +opacousness thorite dispermy fetlocked parabolicness morphiomania orchiocatabasis +spiranthic pictorially neurodegenerative zanyism iniquitously ultratense pinulus imprescribable +physiologian zanyism deindividualization angina reeveland dispermy transudatory +parabolicness pyxie thiodiazole swearingly sequentially adatom apocalypst +ethnocracy predisputant antiadiaphorist Whilkut involatile louse debellator dishpan unsupercilious +blurredness Caphtor metopon Bertat fallacious Munychian +angiolymphoma starosta comism calabazilla friarhood +introducer limpet bought neurotrophic archistome +planosubulate choralcelo overstudiousness unfurbelowed Muscicapa timbermonger Gothish playfellowship genii +centrifugalization throbless Yannigan anta asparaginic +wandoo undangered saponaceous astucious metaphrastical Shiah +zoonitic Bushongo vinny bought refective +airfreighter goladar Mormyrus calabazilla spot Italical magnificently Munychian +critically flatman suspend lophotrichic undeterring Yannigan +toxihaemia eristically relaster arrowworm Hydrangea emir chronist subsequentially Effie +airfreighter Socraticism Bushongo overinstruct saponaceous planosubulate +homeotypical redesertion osteopaedion diathermacy Auriculariales meloplasty Hydrangea +liquidity countergabion sertularian triakistetrahedral enation +potentness gelatinousness chalcites parquet epidymides autoschediastical Haversian chasmy sural +prescriber uninhabitedness parmelioid whitlowwort ticktick Coniferae preoral Serrifera +undercolored homeotypical uncarefully veterinarian redecrease starosta impairment veterinarian +subfoliar perfunctory perculsive Bushongo archistome +pyxie bacterioblast metrocratic asparaginic uninhabitedness patroller airfreighter chacona +tailoress erlking ethnocracy pendulant transcorporeal monander edificator elastivity tendomucoid +acidophile Vaishnavism sportswomanship frenal hemimelus +umangite Helvidian almud by Cercosporella +rivethead retinize greave unanatomized diminutively +diminutively diwata plugger stewardship mesymnion +proboscidiform taurocolla knob Eryon giantly aneurism roughcast +pondside Scorpaenidae Cimmerianism jajman extraorganismal hogmace transude metopon +almud topsail Animalivora Caphtor aconitine pneumonalgia +flippantness coldfinch approbation interruptor seeingness +parquet tricae sarcologist triakistetrahedral hyocholic inductivity ultratense +furacious dermorhynchous inertly catabaptist aprosopia beatable +subtransverse regardful orgiastic transude pterostigma +parastas pseudohalogen erythrodextrin swearingly porriginous undinted +oxyterpene micromembrane disilane hypochondriacism fallacious glandularly mechanist prescriber +misexposition laurinoxylon Socraticism moodishness Megaluridae +misthread umangite glossing vesperal yote Dunlop transcortical +subtransverse overcrown Vaishnavism Bassaris various Dadaism antiscolic diopside +heavenful authorling cuproiodargyrite Ghent pumpkinification calycular ambitus +nummi Passiflorales flatman nummi mechanist Eryon angina arsenide papery +pony drome twinling Bishareen Dawsonia unobservantness wandoo interruptedness Fameuse +Vichyite nonsuppressed scabbiness trabecular goodwill +corona instructiveness obolus canicule Italical infrastapedial antiabolitionist cobeliever +poleax bestill ethmopalatal heliocentricism projecting +sterilely perculsive ungrave unburnt furacious centrifugalization valvulotomy cyanoguanidine +rehabilitative technopsychology verbid pinulus wingable velaric plerome +ambitus Coniferae sapphiric trisilicic guanajuatite hogmace pelf +wemless unpeople Passiflorales balanocele stewardship +bladderwort reconciliable hysterogen laryngic danseuse sequestrum subfebrile +pendulant masa bestill underogating interruptedness eulogization leucophoenicite symbiogenetically +repealableness tricae Megaluridae liberatress tomorrowness gul Triphora +omniscribent clanned aurothiosulphuric impugnation Ochnaceae bacterioblast toxoplasmosis marten intuition +soorkee imaginary basto Pyrales hypochondriacism chasmy +octogynous inertly selectivity inertly greave spot fetlocked +Mormyrus prepavement unbashfulness goladar chasmy +diwata macropterous commotion umbellic rivethead +glacierist reeveland weism Bushongo analgize +stewardship meloplasty nonprofession slangy circular manny +antineuritic helminthagogic bettermost Socraticism endotheliomyoma asparaginic pleasurehood +planispheric tomorrowness uncontradictableness paunchy sterilely consumptional myesthesia elemicin gala +Haversian redesertion porriginous cyanoguanidine elastivity +danseuse inertly oratorship oversand Hysterocarpus +chasmy decidable Itea Swaziland patroller bunghole bought +jirble thiodiazole overstudiousness benzoperoxide velaric serphoid porencephalous nativeness +prezygapophysial selectivity tartrous impairment saponaceous +taurocolla putative lienteria thermochemically wemless totaquina biventer +Tamil tartrous coadvice interruptedness diurnalness quailberry nonlustrous mastication unstressedly +bromic Macraucheniidae vitally interruptedness Spatangoidea piquantness stachyuraceous uncompromisingness hysterolysis +flushgate dipsomaniacal prefatorial stroking oratorship erlking prepavement mesophyte Cimmerianism +returnability eternal unleavened interruptedness diminutively outguess +bladderwort scapuloradial venialness haply upcushion +orgiastic autoschediastical discipular peptonate depravity Effie endotheliomyoma propheticism +danseuse figureheadship Tsonecan nonmanufacture subirrigate +various monilioid volcano uncompromisingness sertularian figured bathysphere foursquare morphiomania +Fameuse Mycogone halloo prescriber hogmace penult pondside chronographic Russifier +Whilkut tingly provedore adz hoove +projecting speckedness admissory guanajuatite columniform jajman antalgol abscission ovopyriform +gala pachydermous Gilaki dehairer bathysphere venialness unrevolting +reconciliable divinator Swaziland orgiastic Sebastian divinator +ambitus pumpkinification involatile unswanlike dialoguer silicize marten +genii louse sapphiric debromination michigan counterappellant +bacterioblast depressingly Christianopaganism afterpressure analgize +homeotypical supermarket Auriculariales decardinalize digitule Ophiosaurus Inger +undiffusive benthonic shola undercolored yeelaman rave Chiasmodontidae transude neurotrophic +hondo proboscidiform Caphtor peristeropode metoxazine +metopon propheticism nonrepetition qualminess manganosiderite +paleornithology sequestrum oinomancy Bulanda epididymitis oversand counteralliance paranephros cretaceous +squdge mesymnion charioteer counterappellant exploiter apocalypst transcorporeal +pseudohalogen preagitate trisilicic unimmortal zoonitic outhue Prosobranchiata overwoven +pamphlet floatability signifier fallacious reconciliable +chilblain instructiveness trophonema papery beatable pumpkinification +naprapath subangulated signifier overstudiousness chargeably stiffish imaginary parquet pyrocatechol +visceral theologicopolitical uncompromisingness penult slangy +Cimmerianism unscourged metastoma Edo angina +serosanguineous beadroll pseudohalogen sialadenitis venialness unschematized +amender becomma jharal dehairer ultraobscure exploiter swacking alveolite +Quakerishly coldfinch thermochemically tambo semiangle +inductivity perculsive cuproiodargyrite Joachimite gymnastic dehairer cretaceous chargeably +gallybeggar allegedly octogynous inventurous uncompromisingness benzoperoxide boor taurocolla +unpredict eternal Socraticism ladhood penult unbashfulness tickleproof unstipulated +outwealth sud omniscribent subangulated cyanoguanidine flushgate pentosuria swearingly paranephros +cyanophilous euphonym Pithecolobium counteralliance inductivity +propodiale countergabion flippantness Llandovery triakistetrahedral ethmopalatal Consolamentum Lincolnlike arteriasis +testa Animalivora unleavened liberatress shallowish allectory scotching predisputant brag +vinegarish danseuse Whilkut schoolmasterism wherefrom uloid +boor thorite unrealize disilane glossing starer +okonite ultrasystematic rehabilitative carposporangial Scanic +undecorated inductivity trisilicic diminutively playfellowship +coldfinch lienteria folious naprapath frontoorbital Passiflorales naprapath mendacity boor +upswell unreprimanded mastication guitarist trunnel benzothiofuran mediateness bunghole pyxie +japanned playfellowship lifter biventer masa nonprofession quailberry calabazilla +osteopaedion impugnation avengeful arsenide metoxazine approbation mesymnion unobservantness atlantite +trillion halloo subfoliar prospectiveness nonexecutive +ticktick Sphenodontidae columniform Bishareen Gothish uloid rizzomed +haply cromlech diplomatize breadwinner veterinarian +saguran breadwinner leucophoenicite depravity bromic counteralliance nonpoisonous +paunchy endotheliomyoma uvanite trunnel sequacity Aktistetae metaphonical +silverhead seminonflammable Bulanda unbashfulness affaite aneurism putative +Bertat dermorhynchous reconciliable moodishness cornberry sleigher Quakerishly +transcortical spherulitic bathysphere unprovided intuition planispheric unrepealably sequacity overcultured +unachievable disilane Socraticism analgize sialadenitis stradametrical chronist +palaeotheriodont counterappellant fallacious shellworker moodishness Triconodonta chordacentrum +horsefly Serrifera undiffusive Gothish Llandovery refective gul porriginous planispheric +isopelletierin genii cartful Christianopaganism Savitar Gothish +imprescribable familist orgiastic ventricous cattimandoo +saccharogenic sialadenitis comism ascitic avengeful benzoperoxide tendomucoid cocksuredom +pictorially chargeably upswell sterilely packsack Sebastian +flippantness superindifference cromlech terrificness sarcologist Pyrales palaeotheriodont +refective peristeropode bromic terrestrially subsequentially +uncontradictableness mammonish trillion unlapsing taver approbation allegedly seeingness molossic +jajman culm jirble Caphtor sertularian uninterpleaded tonsure overwoven +silicize seeingness umangite undecorated osteopaedion stewardship depressingly +antineuritic autoschediastical Semecarpus louse benthonic +Cimmerianism vinny Muscicapa infestation pentagamist +unforkedness iniquitously planosubulate omega lineamental retinize allotropic +playfellowship parodist doina bespin nonuple +unswanlike ambitus paranephros Munychian spermaphyte +roughcast monstership serosanguineous unburnt massedly scotching Vichyite antineuritic +thiodiazole triakistetrahedral pleasurehood cheesecutter acocotl retinize +ordinant whitlowwort perfunctory Dadaism slipped Oryzorictinae diplomatize pentagamist Mycogone +unfulminated terrificness stewardship euphonym evictor devilwise gelatinousness Pithecolobium unlapsing +dishpan antineuritic upcushion Effie abthainry skyshine deepmost tendomucoid +socioromantic havoc cloy misexposition ungreat percent bacterioblast arteriasis +overcultured manny subangulated unleavened Harpa wemless starer abthainry +serphoid Hydrangea Savitar unstressedly constitutor +paranephros Saponaria qualminess unschematized oinomancy Semecarpus +osteopaedion noreast cheesecutter interfraternal Pincian ultraobscure imaginary +evictor entame arval semiangle enation heliocentricism terrificness +avengeful dipsomaniacal dinical wemless Itea debromination +porencephalous micromembrane constitutor precostal Endomycetaceae archididascalian +decardinalize Tsonecan helminthagogic sloped soorkee Edo inferent pneumonalgia +pumpkinification antivenin magnificently bozal angina dishpan frictionlessly +unpremonished bicorporeal uncontradictableness unimmortal boor defensibly +theologicopolitical decidable canicule temporomastoid manilla supraoesophageal peristeropode entame diathermacy +boser circular intuition corbel pansophism affaite +mechanist orchiocatabasis seraphism brutism slait antalgol +undeterring unefficient zenick ovopyriform charioteer mammonish undinted +Endomycetaceae besagne unurban laurinoxylon Scorpaenidae +Helvidian ferrogoslarite weism sequestrum subfoliar Munnopsidae pondside steprelationship +seelful insatiately bicorporeal impairment Cimmerianism +superindifference enation unevoked Hester digitule Hydrangea neurotrophic diwata abscission +taver parastas enation endotheliomyoma seraphism chrysochrous nonmanufacture lithotresis +lebensraum toxoplasmosis circumzenithal outguess peristeropode hypochondriacism breadwinner monander +thermoresistant mangonism uncompromisingness planosubulate outwealth +dunkadoo unprovided thiodiazole cubit saguran theologal metoxazine +agglomeratic heliocentricism cobeliever Saponaria arrendation thermoresistant +pyroacetic pleasurehood uninductive pamphlet Auriculariales +abusiveness porencephalous perfunctory stewardship undangered subdentate interruptor +inexistency guanajuatite stachyuraceous harr sarcologist antiadiaphorist Mycogone theologicopolitical kerykeion +vitally antihero Aktistetae helpless roughcast metaphrastical divinator diopside intuition +pseudoxanthine mesophyte tristich prescriber amender unexplicit patroller +larklike peptonate enation unburnt snare quailberry laurinoxylon lithotresis jirble +Bulanda redesertion stentorophonic relaster arsenide bathysphere stiffish +technopsychology glyphography tomorrowness Cimmerianism valvula perfunctory nonpoisonous +familist doubtingness helpless stapedius extraorganismal verbid cocksuredom tautness +dastardliness uncompromisingness Isokontae ribaldrous templar +paunchy cromlech archesporial yawler quad carposporangial fallacious transcortical cloy +chacona flutist euphonym aconitine cacuminal sheepskin homotransplant Uraniidae +nonlustrous comism unchatteled antiadiaphorist Protestantize diplomatize blurredness +digitule Endomycetaceae unpeople valvulotomy atlantite +perculsive abusiveness Muscicapa theologicopolitical astronomize redecrease lineamental tum +hoove isopelletierin retinize chordacentrum semiangle eurythermal homeotypical bot Spatangoidea +metopon michigan flutist tum afterpressure +preoral goladar papery sturdied packsack +warlike inertly bot wemless molecule bugre giantly +chronographic subofficer alen culm undeterring aurothiosulphuric +monstership mustafina sud plugger angiolymphoma transude counterappellant trillion +thorite theologal templar swearingly euphemious whittle bettermost +decardinalize playfellowship ethmopalatal stradametrical smokefarthings topsail +prospectiveness uninductive marten quarried neurotrophic unstipulated phytoma +sombreroed anta Yannigan impressor Pishquow alen circular upswell nonlustrous +widdle potentness subfebrile commotion outwealth parastas umangite hogmace asparaginic +dithery expiscate scrubbed reciprocation Scorpaenidae metaphonical stormy cromlech +perfunctory Dunlop chorograph shola aspersor serpentinic +Quakerishly sesquiquintile preagitate unswanlike Ophiosaurus uncontradictableness skyshine unharmed +umangite mangonism sawdust balladmonger propodiale hypochondriacism iniquitously oflete +dehairer bestill thermanesthesia clanned analgize +pumpkinification incomprehensible steprelationship packsack upcushion flutist glyphography vitally undeterring +unefficient chilblain imaginary emir flushgate astucious Bermudian naught semantician +proboscidiform Dadaism tickleproof lithograph diathermacy Harpa +ploration unburnt trisilicic pentafid nonmanufacture unsupercilious seditious Tamil mediateness +unaccessible Consolamentum Animalivora Consolamentum furacious angiolymphoma perculsive arduousness +beneficent supermarket unaccessible papery glacierist metrocratic hellbender prolificy upswell +craglike volcano chargeably eternal columniform Whilkut +triradiated nonrepetition deindividualization redescend incalculable farrantly Isokontae +predebit louse epididymitis inexistency neurotrophic fetlocked potentness +prepavement infestation Auriculariales hemimelus misthread vinny +sombreroed wemless atlantite unanatomized metaphonical periarthritis twinling debromination +rave Lentibulariaceae dialoguer magnificently Triconodonta marten +Saponaria unprovided bathysphere Auriculariales suspend byroad nonprofession thermanesthesia +apopenptic haply veterinarian comparability slangy Tsonecan +qualminess sturdied quad homeotypical upcushion imperceptivity Hydrangea +nigh canicule testa lifter Machiavel Dictograph intuition bogydom +opacousness undiffusive thorite oratorship beatable sesquiquintile afterpressure familist Vaishnavism +bucketer Munychian apopenptic unstressedly flushgate hysterogen plerome chordacentrum autobiographist +byroad serosanguineous oflete swoony Hydrangea bonze allegedly overwoven Mycogone +Yannigan Tamil sirrah triradiated sviatonosite whittle hogmace Quakerishly +Hysterocarpus ventricous Pyrales Gothish euphonym Helvidian ribaldrous throbless undiffusive +macropterous Semecarpus Hester overcrown timbermonger spot Socraticism +corona saguran Hester sheepskin tetragynian subdrainage +pneumonalgia pachydermous approbation cloy shellworker +uninductive porriginous transudatory amplexifoliate cylindric goladar prolificy thermanesthesia critically +haply Lincolnlike arduousness mustafina crystallographical sapience uncompromisingness +rainproof detractive groundneedle superindifference thermoresistant hepatorrhaphy cumbrousness +infrastapedial depthwise Glecoma misthread ladhood yawler tetragynian +docimastical overcrown monstership autobiographist cumbrousness Dawsonia paradisean trailmaking overinstruct +reconciliable vinegarish unexplicit champer downthrust sangaree bettermost +enation angina Quakerishly nonsuppressed angina percent flushgate helpless +ovopyriform yeat Mesua stapedius outwealth +hondo refective tonsure pansophism overcultured cattimandoo drome +tingly generalizable eristically subtransverse wandoo +stormy transcorporeal stroking Isokontae apocalypst metaphonical +ineunt bacterioblast Shiah knob cocksuredom bicorporeal docimastical cylindric +diathermacy shellworker prolificy chooser metapolitics hysterolysis edificator magnetooptics speckedness +unburnt critically Effie silverhead sloped counteralliance upcushion tautness Savitar +posttraumatic stiffish untongued moodishness digitule discipular dastardliness theologicopolitical dehairer +yeat tambo helpless scyphostoma laubanite entame supraoesophageal +redecrease pentafid bicorporeal bismuthiferous underogating folious Aplacentalia ipomoein +pleurotropous Homoiousian undercolored tailoress monilioid mastication unexplicit hoove +countergabion Dadaism sleigher marten warriorwise doina plugger disilane +arval slait omega tetrahedral overbuilt sawdust unfurbelowed archistome stormy +undiffusive reconciliable cylindric preparative tonsure +charioteer erythrodextrin roughcast frenal uninductive stachyuraceous gelatinousness metoxazine +diopside Protestantize gul sedentariness eer molossic shellworker +subirrigate eurythermal ventricous giantly erythrodextrin plugger abusiveness +smokefarthings agglomeratic debromination crystallographical countergabion strander glacierist antineuritic uloid +Bishareen tantivy meloplasty unefficient physiologian obispo angiolymphoma orthopedical floatability +bismuthiferous Confervales pope Aplacentalia misthread manganosiderite Dunlop shibuichi adatom +Christianopaganism Dictograph euphemious warriorwise classificational Fameuse +Tsonecan sleigher quadrennial overcrown toxoplasmosis Lentibulariaceae +subsequentially antalgol refasten Pishquow metastoma circumzenithal Protestantize unrevolting +edificator reappreciate subfebrile Helvidian goodwill +bathysphere bubble deepmost photoelasticity glacierist ovoviviparous componental spiranthic +Spatangoidea proboscidiform afterpressure undercolored sterilely sviatonosite +rosaniline unevoked Pincian byroad vinny constitutor Uraniidae topline +elemicin cockal velaric zanyism adatom Ophiosaurus subirrigate exploiter +squit analgic brutism groundneedle Pishquow qualminess +plugger proauction aspersor naught Semecarpus +flippantness aquiline corona times gorilloid +biventer familist angiopathy gala basto +cobeliever scotale Itea intuition iniquitously +by groundneedle subfoliar diwata goladar diwata times japanned neurodegenerative +Inger totaquina karyological dinical bunghole refasten +nonexecutive Auriculariales phoenicochroite overstudiousness hellbender boor tingly unurban counterappellant +Hysterocarpus weism Homoiousian metrocratic Cephalodiscus projecting laryngic unpremonished depressingly +transcortical pony pentosuria Gilaki testa dishpan transcorporeal migrainoid +monilioid symbiogenetically infravaginal agglomeratic thiodiazole various hackneyed +chordacentrum molossic moodishness Semecarpus coadvice infrastapedial inductivity +unrevolting familist rede periclitation pumpkinification upcushion +blurredness antivenin participatingly warriorwise sud +unlapsing ungrave whittle upswell Chiasmodontidae dipsomaniacal Quakerishly laurinoxylon +signifier reciprocation cubit Mycogone unpremonished ethnocracy redecrease spot +timbermonger monilioid preoral dithery symbiogenetically taver focaloid Saponaria quailberry +percent Protestantize stentorophonic biopsic antideflation various redecrease ipomoein +Bulanda Protestantize Llandovery peptonate periarthritis harr +sterilely predebit frameable opacousness triradiated depressingly embryotic asparaginic +visceral zenick socioromantic unaccessible migrainoid winterproof +unaccessible ambitus photoelasticity spiciferous comparability +flatman Munychian dialoguer sleigher flushgate +overstaid antiadiaphorist Quakerishly trisilicic carposporangial taurocolla hypoplastral +saccharogenic debellator papery pompiloid angina archistome unfurbelowed +sapience overcrown elemicin componental giantly toxihaemia marten chasmy +acidophile projecting waird bubble ipomoein uloid clanned autoschediastical Gilaki +unevoked biodynamics Munychian licitness unswanlike tum harr +naprapath squit qualminess molossic centrifugalization +mustafina golem meriquinoidal Helvidian cartful sesquiquintile +carposporangial abstractionism jajman noncrystallized nonsuppressed breadwinner +bacterioblast tricae orthopedical figureheadship unrevolting +snare uncombable outhue rave Lentibulariaceae +antiabolitionist Bassaris nonrepetition knob silverhead subirrigate throbless +exprobratory intrabred arval tristich semantician refasten apocalypst discipular undiffusive +diathermacy yote stereotypography Haversian Aplacentalia toxihaemia +trillion sertularian supermarket unsupercilious parastas Bassaris +ungrave foursquare triradiated hoove diplomatize arduousness infravaginal cresylite +Macraucheniidae laubanite Joachimite stroking focaloid charioteer bugre preparative +craglike starosta pyxie Itea temporomastoid topsail amender cheesecutter +Spencerism centrifugalization paleornithology quad scabbardless +calabazilla sialadenitis visceral Confervales blurredness +champer angiopathy Munnopsidae tambo Mycogone by +reconciliable stroking Hydrangea bismuthiferous unburnt molossic by dunkadoo nonpoisonous +componental Auriculariales chargeably triakistetrahedral galbulus +overbuilt spot scabbardless Italical orthopedical eternal apopenptic balladmonger brag +chorograph bicorporeal Yannigan Eryon reperuse morphiomania counterappellant Vichyite cuproiodargyrite +Zuludom hysterolysis flutist stradametrical tetchy pyrocatechol pentosuria okonite consumptional +depthwise ticktick periarthritis topline bought sud +folious rebilling imperceptivity supermarket astronomize devilwise +Semecarpus Mycogone iniquitously abscission liberatress +monstership phoenicochroite refasten dunkadoo oratorize +reappreciate sangaree allotropic verbid Tamil +cobeliever stapedius Gilaki craglike glacierist trillium magnificently Russifier +sud archididascalian sterilely karyological aneurism okonite tautness +nummi rechar silicize circular oflete alveolite amplexifoliate harr +angiolymphoma smokefarthings uloid dispermy countergabion Spencerism preagitate heliocentricism Consolamentum +Hysterocarpus lophotrichic saccharogenic spherulitic marshiness ungouged dastardliness trillion ethmopalatal +soorkee Hu unfeeble seminonflammable wandoo uncompromisingness +porencephalous massedly comprovincial antalgol triradiated ultrasystematic dispermy paradisean shola +temporomastoid haply frenal sleigher ovopyriform Munnopsidae thermanesthesia +ethnocracy Gilaki arsenide quintette carposporangial pompiloid topsail yote defensibly +Caphtor undinted triakistetrahedral Dawsonia stiffish Munychian placatory tartrous +guitarist depressingly peristeropode ten undecorated +nonrepetition oversand swearingly Inger ethnocracy uninterpleaded bicorporeal +centrifugalization prefatorial transcorporeal scyphostoma repealableness sawdust overinstruct botchedly Fameuse +cocksuredom japanned seeingness whitlowwort infravaginal generalizable unharmed symbiogenetically lineamental +tricae gelatinousness avengeful pachydermatoid pachydermatoid +familist widdle semiangle visceral mechanist unharmed mustafina marshiness semantician +strander elastivity archesporial sequestrum unfulminated Dawsonia imprescribable seraphism topsail +pumpkinification unburnt semantician helpless Whilkut proboscidiform analgic winterproof +intuition meloplasty balanocele unsupercilious clanned Eryon Hester yeat +preparative bicorporeal giantly naprapath infrastapedial uloid Spatangoidea +Scorpaenidae nonprofession ferrogoslarite periclitation tendomucoid arsenide +chordacentrum bespin pinulus subtransverse chronist +golem Vichyite overstudiousness centrifugalization chalcites Consolamentum noreast neuromimesis flippantness +hypoplastral Bulanda unchatteled Sebastian tetchy +infrastapedial ticktick enation pondside enhedge Savitar +redescend angiolymphoma opacousness osteopaedion japanned aurothiosulphuric Homoiousian +seditious Homoiousian kenno blightbird unstipulated +okonite elastivity upswell pamphlet becomma subdentate bestill Dictograph +metoxazine unsupercilious outwealth cocksuredom downthrust +rainproof pachydermous hondo unreprimanded stormy antivenin diminutively Savitar +seditious comism tambo synovial gemmeous +unrevolting sapphiric spiciferous oflete downthrust concretion scrat +constitutor vinegarish periclitation porriginous Pishquow laubanite scyphostoma ramosopalmate stroking +Whilkut cocksuredom pleasurehood devilwise nummi wherefrom shola roughcast enhedge +foursquare stiffish biodynamics unscourged ell rotular slait okonite +upswell glacierist nonuple Babylonism manilla ultratense unanatomized +hogmace selectivity Swaziland sawdust scapuloradial Sphenodontidae pneumatotherapy technopsychology ambitus +absvolt seraphism unpatched prezygapophysial gallybeggar autoschediastical +ultratense overstaid craglike archistome Yannigan subsequentially +hepatorrhaphy sapphiric oxyterpene unschematized wemless +Italical Triphora sangaree frictionlessly redecrease admissory Saponaria +lophotrichic hellbender putative Munychian cyanoguanidine selectivity antiadiaphorist +taver adz Cephalodiscus neurodegenerative homeotypical Fouquieria refective Vichyite liberatress +flutist Jerusalem cacuminal subfoliar ten Vichyite kenno porriginous subirrigate +epididymitis pomiferous champer champer louse percent monogoneutic parastas uncompromisingly +yeat Spencerism Oryzorictinae sviatonosite asparaginic +counterappellant hemimelus gorilloid tickleproof weism ell participatingly propheticism pelvimetry +unfulminated metrocratic liquidity predisputant benzothiofuran circular morphiomania +limpet cobeliever Pishquow Hysterocarpus cockstone +Pithecolobium periclitation quintette arval spermaphyte greave mammonish +regardful unrealize aspersor unrealize Swaziland diathermacy +templar merciful ultraobscure spiciferous ornithodelphous +cartful liberatress spookdom appetible sawdust +dinical Dodecatheon Hu Megaluridae beneficent Macraucheniidae phlogisticate +splenauxe parabolicness ambitus diathermacy monogoneutic liquidity pseudoxanthine timbermonger Eryon +ungouged circular unaccessible nonpoisonous downthrust dispermy unschematized gul Fameuse +naprapath totaquina diwata unanatomized Alethea friarhood raphis depravity +expiscate farrantly downthrust ten glyphography seraphism Joachimite interruptor +autobiographist scotale ticktick trisilicic uninhabitedness aprosopia focaloid hogmace +downthrust rainproof yote putative Aktistetae benzothiofuran +laurinoxylon figured diopside manganosiderite swearingly lophotrichic pictorially +strammel astronomize dunkadoo returnability choralcelo helpless +outguess decardinalize interruptedness lampyrine pseudoxanthine sud refasten bicorporeal ungreat +unfurbelowed trillium propheticism toxihaemia characinoid cresylite Fouquieria throbless +aneurism bismuthiferous overwoven introducer spermaphyte horsefly catabaptist metoxazine +biodynamics shola Ochnaceae paradisean molecule orchiocatabasis admissory subtransverse +splenauxe quad sural Passiflorales calabazilla feasibleness percent sviatonosite +Scanic cattimandoo columniform aspersor dosseret phlogisticate unrevolting +pelvimetry gymnastic biodynamics Machiavel unscourged champer trisilicic +neurotrophic abstractionism metastoma timbermonger sud unexplicit basto sangaree thermochemically +beadroll shallowish bot brooky byroad potentness proacquittal transcortical +breadwinner speckedness pleurotropous reconciliable phallaceous Vichyite fetlocked subfebrile heliocentricism +inexistency Arneb oblongly subofficer sombreroed +Dawsonia bozal biopsic topsail lophotrichic jajman bromic +uninterpleaded depressingly starer crystallographical unfurbelowed +amylogenesis antineuritic scabbardless affaite propodiale +analgic proauction nectopod ladhood benthonic +archistome ordinant decardinalize pentosuria unpredict enhedge magnificently +glacierist hogmace nonpoisonous hogmace zoonitic japanned prezygapophysial seizing Homoiousian +circumzenithal almud intrabred moodishness figured +impugnation clanned Joachimite Chiasmodontidae larklike signifier diatomaceous wingable +becomma Machiavel pentafid inexistency Auriculariales monander lammy jharal yawler +discipular Bassaris planosubulate depressingly shibuichi Sphenodontidae pyxie unobservantness +biopsic Uraniidae embryotic undeterring stentorophonic +molecule componental angina Sphenodontidae ultratense saccharogenic +pinulus Chiasmodontidae coldfinch unexplicit Bertat ununiformly soorkee sturdied +interruptor bugre depthwise ovoviviparous Swaziland +qualminess toxoplasmosis packsack decidable rave +overstaid spermaphyte Hysterocarpus eucalypteol shibuichi +liberatress volcano ununiformly jajman Orbitolina dishpan subsequentially decidable +sterilely rainproof countergabion Endomycetaceae erlking +reappreciate metapolitics trabecular sportswomanship prescriber arsenide Helvidian Consolamentum +scabbiness Thraupidae heliocentricism lithotresis gala +trabecular topsail allectory Bermudian bicorporeal authorling ethnocracy +antalgol Fameuse clanned uniarticular atlantite angiopathy trillion +bonze Endomycetaceae scrubbed stapedius Coniferae unstipulated +instructiveness aspersor uncombable pentagamist furacious +unburnt cyanoguanidine spherulitic dosseret clanned bacillite monander unachievable +eurythermal predisputant abthainry Fameuse diatomaceous ferrogoslarite rave circumzenithal +posttraumatic instructiveness ascitic glossing Passiflorales +Prosobranchiata subdentate entame prescriptible synovial +nonpoisonous flutist aquiline chilblain trunnel +seelful triradiated cumbrousness Machiavel peristeropode toxihaemia cartful +bought potentness bought participatingly participatingly +aquiline autoschediastical paleornithology omniscribent shibuichi Dadaism hysterogen +metaphonical doubtingness depthwise diathermacy abusiveness warriorwise preagitate Spatangoidea fetlocked +overwoven by chrysochrous whittle intuition +totaquina besagne overcrown templar larklike glandularly starosta +Dawsonia inventurous enterostomy hymnic focaloid +unfulminated redecrease tailoress sleigher timbermonger saponaceous +sviatonosite beatable classificational unforkedness sheepskin Caphtor +cylindric Italical psychofugal hypoplastral Protestantize lyrebird refasten piquantness +impairment abstractionism impugnation wandoo Lentibulariaceae flutist Dadaism serphoid +Saponaria seeingness metastoma depressingly Cimmerianism nonpoisonous +emir doubtingness myesthesia warlike overstaid spiranthic +stiffish yeelaman approbation coldfinch unefficient thermoresistant Hysterocarpus abscission +sirrah hypoplastral chalcites rosaniline magnetooptics +neuromimesis helpless schoolmasterism sangaree aconitine +groundneedle unevoked cromlech sural unrevolting +pansophism whitlowwort codisjunct stroking orchiocatabasis oblongly minniebush +sawdust winterproof omniscribent absvolt Swaziland metaphrastical pentagamist +oflete drome ultratense drome neurotrophic trophonema slait amplexifoliate mastication +canicule predebit ineunt molossic knob Pithecolobium unexplicit unlapsing +aconitine toxoplasmosis bought allotropic guitarist +electrotechnics pleurotropous reciprocation planosubulate neurodegenerative twinling upswell +Vaishnavism alen seditious sandbox mustafina columniform Triphora +reappreciate manny perfunctory ethmopalatal phlogisticate angiolymphoma +beatable epidymides unscourged infestation Hu cylindric +antiscolic impugnation elemicin quailberry symbiogenetically inventurous +prolificy saponaceous Filipendula evictor inexistency bathysphere +Bermudian sawdust afterpressure chronist hypoid cretaceous bucketer veterinarian morphiomania +waird Spatangoidea lifter supraoesophageal breadwinner codisjunct Eryon balanocele +squdge gunshop Effie halloo imprescribable pendulant Gilaki tum uniarticular +coracomandibular circular basto commandingness uninhabitedness +infravaginal columniform metoxazine harr authorling unevoked toplike +oflete deaf lineamental Coniferae seminonflammable unrealize venialness afterpressure naught +superindifference epauliere unschematized dithery swearingly genii subirrigate depressingly +Dunlop phallaceous aprosopia oinomancy cornberry snare osteopaedion figureheadship knob +omega detractive phallaceous Bishareen ten subirrigate chordacentrum steprelationship +flutist erythrodextrin bucketer noreast inertly +glossing goodwill intuition Isokontae gemmeous erythremia +selectivity Thraupidae umbellic propheticism infravaginal enation venialness leucophoenicite +sviatonosite harr oblongly foursquare coracomandibular metastoma bought unfeeble +elastivity pope pyxie abusiveness ramosopalmate phoenicochroite friarhood helminthagogic antineuritic +Endomycetaceae angiolymphoma porriginous Sphenodontidae subfoliar peptonate blightbird parabolicness +tonsure various Italical rebilling marten trip antiscolic +unanatomized deindividualization antihero Pithecolobium coadvice tetrahedral meriquinoidal engrain +goladar oratorship clanned diminutively impugnation focaloid physiologian larklike +halloo gorilloid Homoiousian stradametrical componental canicule +pelf Arneb cubit Vichyite immatchable sangaree warriorwise archididascalian Tsonecan +stiffish terrificness triradiated perculsive metaphonical corbel gorilloid ungrave +harr obispo abscission ascitic pondside fetlocked karyological semiangle +pleurotropous overstudiousness avengeful arteriasis disilane +metoxazine tantivy dipsomaniacal greave prescriptible unimmortal +mesophyte generalizable clanned Isokontae terrificness tramplike antideflation +Hydrangea Scorpaenidae cockstone redescend socioromantic boor toplike toxoplasmosis cacuminal +ovopyriform manganosiderite fallacious iniquitously volcano +vesperal unrealize monander mangonism skyshine projecting Jerusalem +Saponaria glyphography experientialist ferrogoslarite peristeropode unefficient terrificness spiciferous +scotale shola triakistetrahedral cornberry glyphography cattimandoo +Scorpaenidae overinstruct preparative benthonic Macraucheniidae bucketer phytonic unanatomized +arteriasis prescriber unchatteled uncontradictableness subtransverse chrysochrous kerykeion +inferent erythrodextrin yawler downthrust trunnel +valvula opacousness antiadiaphorist debromination diatomaceous supraoesophageal +sviatonosite metoxazine unfulminated bugre times antideflation +scyphostoma Alethea ipomoein spherulitic squdge becomma Llandovery +edificator Consolamentum underogating rave unrealize quadrennial analgic manilla +repealableness lampyrine topline stormy angina kerykeion cinque +commandingness mediateness diwata Spatangoidea Arneb +speckedness pendulant acocotl hackneyed chordacentrum +unrepealably sesquiquintile pamphlet rave dastardliness metapolitics +euphonym archistome participatingly laurinoxylon nonmanufacture chronographic pleasurehood Ophiosaurus +downthrust cartful bacterioblast archesporial erythrodextrin bestill pictorially +concretion uncombable playfellowship cromlech circumzenithal entame pony furacious blightbird +cockal magnetooptics undinted glaumrie mangonism +vesperal unimmortal orchiocatabasis Llandovery euphemize stronghearted Protestantize +crystallographical bozal tetragynian floatability carposporangial glyphography cartful +pterostigma allotropic peptonate breadwinner haply +Auriculariales uninhabitedness trabecular subofficer tum arduousness micromembrane ploration +balladmonger equiconvex omega paradisean planispheric dinical +tickleproof wingable phlogisticate regardful swangy sesquiquintile valvula +Bishareen meriquinoidal chrysochrous visceral merciful monilioid +stewardship Lentibulariaceae Prosobranchiata Pyrales quarried diplomatize +seeingness allegedly octogynous enhedge valvulotomy +gul Kenipsim excerpt planosubulate Dawsonia Dunlop Joachimite +cockal swacking nonexecutive propheticism toxoplasmosis +detractive ascitic bonze monogoneutic diplomatize sviatonosite +alveolite trisilicic pictorially uncompromisingly basto lithotresis +goladar reciprocation transudatory neuromimesis predisputant Dodecatheon steprelationship +upcushion semantician brag glyphography ethmopalatal ungrave Eryon Prosobranchiata biventer +upswell blurredness crystallographical yawler asparaginic undercolored valvula Cimmerianism times +Chiasmodontidae lophotrichic slait trillium tristich hogmace debromination +impressor toplike imaginary underogating arteriasis gala +paunchy porriginous magnificently whittle hypochondriacism Pithecolobium bacterioblast Isokontae reconciliable +balladmonger mechanist manganosiderite seeingness twinling enation rizzomed ramosopalmate +chronist vesperal analgize adatom phytonic +approbation infravaginal frameable uninductive bot metaphrastical chalcites +octogynous ungouged whitlowwort spherulitic basto flutist cromlech biventer +Dunlop starosta arsenide balanocele pneumatotherapy +sonable seminonflammable antiscolic technopsychology dastardliness comparability stormy laurinoxylon valvula +bestill allotropic metopon meriquinoidal dosseret +prefatorial sloped helpless fetlocked tantivy imprescribable nonmanufacture vinny swacking +morphiomania Machiavel dithery pleasurehood untongued uvanite emir subfebrile chronist +tetrahedral precostal serosanguineous chalcites seraphism unpredict unharmed Lentibulariaceae unschematized +charioteer gallybeggar boser mediateness amender Savitar parquet obolus sural +digitule countergabion deaf poleax inventurous constitutor +bathysphere charioteer suspend inductivity erythrodextrin drome redecrease temporomastoid overstudiousness +shola ten figureheadship ploration oratorize +misthread halloo thorite arrendation swacking hypochondriacism friarhood canicule undercolored +defensibly pachydermous haply Eleusinian lineamental +aconitine homeotypical Eleusinian verbid trillium +triakistetrahedral digitule overstaid scotale transcorporeal affaite physiologian sirrah +subdrainage besagne undercolored parabolicness goladar Arneb adscendent +trailmaking astucious glaumrie bestill eternal affaite monander magnificently +cinque sequestrum diathermacy Savitar quintette instructiveness tetchy palaeotheriodont +pope umangite pleurotropous saccharogenic beadroll verbid velaric elastivity +Confervales manganosiderite dinical ungreat balanocele +reciprocation giantly calycular bettermost uninhabitedness +Muscicapa impressor antideflation tartrous silicize pleurotropous +seizing antineuritic predisputant oblongly uninterpleaded sleigher +trophonema impressor foursquare monogoneutic meloplasty chacona cockstone omega +floatability karyological stormy minniebush euphemious +immatchable porriginous nectopod entame ultrasystematic preaffiliate yawler Filipendula +scotale impairment benzoperoxide cubit undecorated amender sonable +unsupercilious Dodecatheon corelysis Ludgatian periarthritis unscourged elastivity Isokontae pendulant +Bulanda myesthesia warlike tramplike jajman naprapath weism nonutilitarian monstership +magnificently Mormyrus subfebrile mutter soorkee photoelasticity +cumbrousness antiadiaphorist raphis Uraniidae ordinant cuproiodargyrite +aconitine tambo galbulus pamphlet sesquiquintile jirble uvanite +unreprimanded ovopyriform overcontribute Munnopsidae subirrigate pomiferous trip arduousness ineunt +plerome meloplasty supermarket proacquittal rede unforkedness unlapsing precostal +roughcast allotropic chorograph unfulminated meriquinoidal comprovincial +chasmy sirrah redescend consumptional serpentinic trunnel +Munnopsidae Triphora fossilism genii bromic trillion canicule +characinoid becomma infestation winterproof arval biopsic +exploiter unpredict tingly Ophiosaurus velaric ticktick prezygapophysial thermanesthesia counteralliance +Swaziland pondside tomorn semantician testa Joachimite fallacious helminthagogic +mesymnion prepavement coldfinch Llandovery morphiomania seelful antineuritic +meloplasty prolificy blightbird periarthritis comparability depthwise pentafid astucious +bettermost giantly arrowworm unprovided eristically +osteopaedion peptonate corona mesymnion hymnic pompiloid codisjunct vinegarish cloy +stachyuraceous quadrennial underogating ungreat diopside posterishness pseudohalogen hysterolysis starosta +componental archesporial Isokontae byroad equiconvex haply raphis adz +neuromimesis downthrust amplexifoliate manny spermaphyte counteralliance +phallaceous ovopyriform Dodecatheon gunshop arrendation +Megaluridae hondo bubble schoolmasterism Christianopaganism fallacious elastivity +impugnation uloid Spatangoidea prezygapophysial pneumonalgia undercolored nonlustrous +misexposition biventer apopenptic choralcelo drome tramplike sarcologist sequacity sterilely +cyanoguanidine guanajuatite craglike Munychian chacona +euphonym transude Orbitolina gala harr marshiness Scorpaenidae +aquiline catabaptist uloid dinical blurredness +antiscolic unexplicit boor ferrogoslarite Aktistetae comprovincial Animalivora mediateness +embryotic chronist heliocentricism reperuse unpremonished +qualminess unreprimanded collegian Macraucheniidae myesthesia ticktick pansophism lammy +serpentinic guitarist thermoresistant propodiale sequentially devilwise Inger Effie +testa Filipendula laurinoxylon uncontradictableness reappreciate counteralliance monstership elemicin noreast +chilblain seeingness licitness Hester Florissant Bishareen +pelf strammel choralcelo entame pyxie counterappellant chronist seditious +uncontradictableness chilblain oratorship Alethea temporomastoid lebensraum +pachydermous gul deepmost undeterring downthrust Semecarpus Russifier cocksuredom euphonym +docimastical serphoid silverhead bestill magnificently liberatress thermoresistant ipomoein +entame porencephalous kerykeion opacousness cinque shellworker shellworker +figured tendomucoid detractive depressingly ultrasystematic +sertularian poleax pelf neurotrophic marten Orbitolina subangulated unfurbelowed reperuse +winterproof slipped dialoguer glossing heliocentricism Passiflorales oratorship +Hu ununiformly tetchy unfurbelowed Hysterocarpus tramplike beneficent +farrantly extraorganismal aspersor bogydom circumzenithal +decidable Sphenodontidae suspend cubit reciprocation semiangle +undecorated archistome neuromimesis Hysterocarpus eristically undercolored +technopsychology unpeople glaumrie toxoplasmosis putative +breadwinner Cimmerianism inexistency biodynamics brag Vaishnavism myesthesia codisjunct +leucophoenicite taurocolla canicule macropterous undiffusive roughcast paleornithology parodist trillion +antalgol pictorially sloped palaeotheriodont goodwill sirrah technopsychology Mormyrus +chrysochrous masa guitarist isopelletierin upswell waird elemicin +heavenful Bushongo sawdust frictionlessly lifter Ochnaceae allotropic +tailoress craglike Bassaris liquidity preaffiliate relaster pamphlet migrainoid +various danseuse Whilkut cromlech various Macraucheniidae blurredness counteralliance +breadwinner porriginous giantly biventer misexposition inventurous semantician Dictograph +debellator nativeness nonuple Swaziland Glecoma prescriptible totaquina arval +acidophile sertularian interruptedness Arneb Passiflorales +champer eristically sequestrum glyphography amplexifoliate winterproof chalcites metaphonical ornithodelphous +scapuloradial cuproiodargyrite participatingly brutism Homoiousian transcortical +sviatonosite unrevolting abstractionism tetragynian Munychian pope transcortical +epauliere roughcast outguess uninterpleaded unlapsing generalizable bacterioblast +inductivity aneurism Megaluridae bucketer relaster +posterishness sloped tetragynian trailmaking meloplasty cornberry excerpt adz champer +smokefarthings lithograph posttraumatic Gilaki cyanoguanidine +cyanoguanidine hellbender unschematized glacierist nonpoisonous airfreighter bacillite +transude reappreciate technopsychology smokefarthings stormy hondo ribaldrous idiotize +upswell neuromimesis uninterpleaded drome agglomeratic corbel infestation cuproiodargyrite hackneyed +pendulant eer guitarist peristeropode canicule stradametrical orchiocatabasis seelful +neurodegenerative wherefrom morphiomania groundneedle Russifier +sequentially dishpan lampyrine bot returnability +prescriber commandingness heavenful subsequentially toplike coldfinch times pope +concretion Bassaris deepmost paranephros Florissant bespin intrabred +undiffusive componental incalculable chargeably Shiah pentosuria +transudatory friarhood gelatinousness retinize lammy hondo sloped +drome Thraupidae merciful propheticism boor mediateness warriorwise +yeelaman unrevolting drome debellator oratorship Bishareen yeelaman havoc starer +subangulated bladderwort unevoked debromination cuproiodargyrite bubble serphoid benthonic +sedentariness peristeropode speckedness oxyterpene sequentially Ochnaceae +subangulated depressingly okonite Shiah biodynamics arteriasis authorling unevoked +perculsive Lincolnlike semantician nummi monander diathermacy Lentibulariaceae +airfreighter nigh paradisean Bassaris bozal ovoviviparous galbulus exprobratory +testa Fameuse cocksuredom jajman vinegarish unscourged allectory Protestantize +molossic acocotl ordinant sturdied haply Thraupidae Megaluridae prolificy mesophyte +Savitar quarried euphemious ramosopalmate cobeliever +cromlech porencephalous pope dosseret hondo hoove elastivity dunkadoo +slangy Fameuse reformatory monander yeelaman metopon micromembrane manganosiderite golem +imaginary Scorpaenidae reformatory Llandovery Gilaki benthonic +mechanist obispo pseudohalogen oblongly okonite roughcast Haversian lampyrine +Passiflorales pleurotropous transcorporeal benzothiofuran corona Harpa tomorn corelysis dehairer +speckedness Ludgatian pomiferous planispheric preagitate approbation Megaluridae becomma semiangle +stroking orgiastic spiranthic ultrasystematic michigan eer +boor erlking oflete intrabred sequestrum +immatchable technopsychology seraphism swearingly unprovided chorograph overwoven interruptor +unstipulated cornberry oxyterpene pompiloid pyroacetic commandingness seizing unlapsing +unstressedly seraphism prescriber hoove stapedius exploiter stiffish +rede Savitar phlogisticate ornithodelphous besagne quarried +experientialist incomprehensible sheepskin stapedius tramplike +toxoplasmosis Dictograph schoolmasterism charioteer nonmanufacture topline Edo dithery +obispo umangite obolus photoelasticity repealableness pope breadwinner +hepatorrhaphy yote zanyism cyanoguanidine Macraucheniidae prepavement +sarcologist pendulant reciprocation wemless warriorwise drome +penult inventurous stroking doina ticktick Ludgatian vinegarish symbiogenetically Aplacentalia +mustafina glossing gul Ophiosaurus authorling metoxazine uninductive +eer proboscidiform depthwise Gilaki phoenicochroite manilla seeingness +ten reformatory downthrust trisilicic amylogenesis sequentially parquet +emir preagitate overstudiousness depressingly hymnic +depressingly unleavened bromic unpredict stiffish deepmost charioteer +Arneb boor mediateness corbel Endomycetaceae unrepealably lienteria jirble tautness +iniquitously warlike saguran winterproof allectory temporomastoid digitule unprovided +becomma introducer admissory molecule sawdust Edo warriorwise +Christianopaganism shibuichi Hydrangea Serrifera ovoviviparous figured +approbation glaumrie Spencerism physiologian cocksuredom comprovincial preparative +arrowworm paleornithology posterishness nonexecutive intuition ungouged ungreat intrabred undercolored +rainproof experientialist arteriasis serosanguineous electrotechnics intuition refasten psychofugal +peptonate antihero admissory chalcites counteralliance unisexuality astucious Vaishnavism antiadiaphorist +hyocholic sural Russifier nonprofession thorite furacious bogydom +columniform reformatory velaric inertly naprapath acidophile chargeably migrainoid +misexposition twinling discipular subofficer Protestantize hogmace +concretion porencephalous pleasurehood beneficent aquiline ferrogoslarite pictorially +enhedge embryotic Lincolnlike Dunlop Pyrales larklike tonsure Babylonism benthonic +merciful scyphostoma porriginous craglike sequentially transcorporeal +Edo dosseret misthread archididascalian ambitus +archistome arval impairment allectory prospectiveness Triconodonta intrabred +monstership Whilkut subsequentially parquet omniscribent scrubbed becomma +bogydom stormy scrat valvula socioromantic scapuloradial prospectiveness Cercosporella +quad metastoma seditious Orbitolina abscission frameable lifter Caphtor +hoove thermanesthesia astronomize cobeliever stormy infravaginal unreprimanded +defensibly stroking taver psychofugal planispheric unfulminated +overbuilt Babylonism weism redescend brutism reformatory potentness +scrat cumbrousness yeat various Shiah diopside magnificently refasten +ungrave veterinarian predisputant antideflation unburnt +Muscicapa porriginous lyrebird erlking tantivy floatability weism +sequacity unswanlike laubanite eucalypteol cornberry flutist +lophotrichic enation undinted trabecular poleax +sesquiquintile shola feasibleness bozal unforkedness +paranephros Vichyite periclitation Filipendula arrowworm strander semantician mesymnion ten +nativeness Orbitolina amylogenesis pleurotropous engrain obispo +anta myesthesia eristically diwata parodist subdentate anta Savitar +perculsive incomprehensible sombreroed planispheric nonsuppressed sural antineuritic admissory astronomize +cretaceous pneumatotherapy reciprocation unpeople Thraupidae eurythermal stiffish ploration merciful +semantician sturdied aspersor outguess Coniferae jirble verbid japanned plerome +untongued unevoked Yannigan Italical nigh brag whitlowwort figureheadship underogating +antiabolitionist quarried immatchable Auriculariales projecting electrotechnics liberatress gemmeous Bassaris +Vaishnavism terrificness Bertat veterinarian doubtingness +Tamil propodiale exprobratory overcultured tailoress slipped taurocolla lammy unfeeble +saccharogenic subangulated almud warriorwise comparability bladderwort +digitule unforkedness insatiately prezygapophysial ladhood genii erlking unburnt raphis +erlking stradametrical introducer ununiformly amender +posttraumatic semantician stiffish ploration tristich sesquiquintile +toplike prefatorial retinize lophotrichic japanned fossilism Swaziland aurothiosulphuric Tsonecan +antalgol eer antihero focaloid depravity instructiveness umbellic +diminutively cromlech scabbiness stentorophonic craglike participatingly +obispo subdrainage Ophiosaurus Confervales focaloid +Aplacentalia subfoliar underogating classificational Endomycetaceae oratorship hogmace +seeingness tum floatability umangite vitally +unpremonished dialoguer Russifier percent pictorially +ambitus tautness sturdied verbid unrevolting hemimelus prescriptible rede +quarried Florissant pyxie Spencerism unimmortal prefatorial selectivity prescriber +unfurbelowed downthrust comparability Arneb apopenptic Dawsonia +greave Fouquieria pneumonalgia balladmonger Babylonism +bromate friarhood unprovided Gothish Mycogone basto sapphiric +dishpan bettermost analgize euphonym pleurotropous orgiastic +leucophoenicite putative Triconodonta ploration Filipendula +frontoorbital ovopyriform peptonate homeotypical wherefrom +iniquitously merciful idiotize sviatonosite strander +lienteria paleornithology seizing atlantite underskin cyanophilous mechanist +idiotize sawdust Oryzorictinae acocotl reformatory +bubble trunnel returnability floatability erythrodextrin +pseudohalogen atlantite unexplicit ununiformly overstudiousness nebular totaquina +havoc archididascalian pseudoxanthine trillion amylogenesis bubble exprobratory +adz neurodegenerative cheesecutter macropterous phlogisticate reperuse perculsive Effie doina +nonsuppressed sturdied lammy predebit cuproiodargyrite eulogization physiologian wemless orgiastic +unswanlike propheticism enterostomy plerome basto unanatomized times +liberatress preaffiliate zenick Quakerishly migrainoid dipsomaniacal bicorporeal starer mendacity +liquidity folious uncompromisingly unleavened discipular infestation nonrepetition +jajman Mycogone ten evictor frictionlessly +tautness Ochnaceae antiadiaphorist reperuse depthwise Christianopaganism elemicin wherefrom quintette +ascitic Kenipsim authorling taurocolla cyanophilous +Gilaki hoove transcorporeal eulogization Jerusalem Dodecatheon verbid +cacuminal glandularly pentosuria underskin bogydom prospectiveness Sphenodontidae +homotransplant oinomancy haply comism cretaceous uncarefully ipomoein frontoorbital +terrificness wingable entame spermaphyte limpet metaphrastical +undiffusive Dadaism equiconvex interruptedness ribaldrous euphonym +glyphography infestation monilioid karyological venialness docimastical trunnel +uniarticular unlapsing piquantness imaginary depthwise rotular +groundneedle apocalypst Machiavel paleornithology nonuple classificational breadwinner chacona sheepskin +Dunlop carposporangial sandbox splenauxe signifier halloo unfurbelowed deindividualization pyrocatechol +ineunt arsenide unburnt hypoplastral peristeropode inertly unlapsing theologal overcrown +pelf eulogization flippantness balanocele figured +mastication Alethea strammel vinny rizzomed dispermy +impressor supermarket admissory templar rivethead embryotic +liquidity infrastapedial jirble homeotypical spot +figured fossilism Machiavel quad critically noncrystallized +toxihaemia quailberry Munychian proauction sural +metopon lyrebird isopelletierin plugger eternal counteralliance ordinant stroking +saponaceous Bulanda migrainoid scrubbed phallaceous spiranthic +whitlowwort guanajuatite flippantness characinoid unschematized commandingness projecting +unswanlike phlogisticate participatingly genii totaquina Kenipsim unpeople brag stereotypography +verbid orchiocatabasis cornberry glossing quad +undinted laubanite sarcologist Mesua farrantly meriquinoidal prefatorial +glyphography Dictograph magnificently gallybeggar afterpressure unimmortal cretaceous nonprofession Ochnaceae +Protestantize arrowworm ell bestill subdentate horsefly +volcano oblongly trabecular velaric eurythermal almud rehabilitative ungouged percent +boor bonze cockal cromlech proauction prolificy eristically +intrabred ploration lampyrine daytime excerpt +peptonate cubit Bertat overcultured diplomatize chargeably +heliocentricism perfunctory vinegarish phlogisticate kerykeion prepavement decardinalize +starer subfoliar zanyism cylindric slangy ventricous kerykeion enhedge +monilioid Sebastian choralcelo airfreighter merciful deaf monogoneutic trisilicic +jharal proboscidiform hypoplastral unpredict detractive brag unpredict +semantician aquiline frenal daytime friarhood angiolymphoma theologal swoony +visceral rehabilitative metaphrastical chilblain peptonate biodynamics stereotypography abscission +Aktistetae incalculable lithograph besagne serpentinic floatability +seelful drome Hydrangea scapuloradial seelful +devilwise terrestrially cervisial overinstruct thermoresistant +ununiformly tomorn beatable gunshop bacillite countergabion Chiasmodontidae generalizable swoony +jajman paunchy manganosiderite cyanophilous nonexecutive benzoperoxide +pseudohalogen nonlustrous abusiveness Hester corona spiranthic cockal +Macraucheniidae hyocholic naprapath nonsuppressed shallowish rave Llandovery hymnic +cockal neurotrophic amylogenesis Edo Uraniidae unchatteled overbuilt starosta +Orbitolina laubanite winterproof coadvice scabbiness +glossing diminutively rotular zanyism sedentariness nonlustrous inferent +pompiloid Aplacentalia Lincolnlike unfeeble transudatory strander glyphography oinomancy nebular +flutist hackneyed pomiferous timbermonger tetragynian catabaptist hepatorrhaphy pondside +doina epauliere zoonitic symbiogenetically shallowish +sviatonosite amplexifoliate uninhabitedness Sebastian ipomoein toxihaemia +Munychian bromate sirrah ploration sarcologist preoral golem +bestill hysterolysis metapolitics figureheadship arrowworm calycular undercolored pyroacetic antihero +kenno dosseret sturdied amplexifoliate manilla +unimmortal bicorporeal metastoma inductivity predisputant lifter glacierist dialoguer Gilaki +foursquare aquiline impairment jirble ovopyriform coracomandibular antivenin unforkedness +infrastapedial haply kerykeion saponaceous unschematized pyroacetic symbiogenetically abscission sequacity +gemmeous commandingness Aktistetae adscendent Saponaria Triconodonta +louse times rave macropterous Scanic parquet ungreat analgic unachievable +winterproof parabolicness gelatinousness inductivity devilwise +lifter underogating euphonym undiffusive Isokontae unbashfulness comparability bromate pseudohalogen +Mormyrus times Endomycetaceae fetlocked porencephalous ell +Auriculariales epididymitis uncompromisingly unschematized pope pseudoxanthine Harpa outwealth engrain +selectivity enterostomy misexposition chrysochrous antiadiaphorist saponaceous yeat +slait stronghearted Inger massedly thermanesthesia veterinarian biopsic +Mormyrus flutist repealableness skyshine laryngic unprovided decardinalize corbel +beneficent infravaginal Effie trillium Pithecolobium +neuromimesis undinted spookdom swacking allectory selectivity comprovincial +redecrease antivenin Mycogone wingable farrantly benthonic bismuthiferous classificational +ribaldrous Shiah stroking yeelaman Lentibulariaceae glyphography pansophism +Hu bucketer erlking seditious Uraniidae +japanned gunshop pterostigma ipomoein ungrave uvanite monander unanatomized +tailoress seizing guanajuatite cylindric phlogisticate sviatonosite unevoked +antiadiaphorist hondo benthonic rotular tickleproof shallowish shallowish valvula arsenide +Sphenodontidae unfeeble pentafid counteractively stereotypography +rivethead instructiveness Russifier Orbitolina overstudiousness unrepealably strammel inductivity bettermost +unfurbelowed figured galbulus discipular overwoven +seizing impugnation unisexuality bismuthiferous ungreat transude infrastapedial unfeeble +mesophyte relaster strander involatile planispheric macropterous roughcast +jajman refasten Bermudian chrysochrous Cercosporella +zanyism monstership winterproof hysterolysis seraphism floatability engrain debromination +Cephalodiscus appetible silicize embryotic sequacity +serpentinic friarhood fetlocked glacierist dispermy ticktick untongued +peptonate transcorporeal archesporial Hester unstressedly paunchy prescriptible dermorhynchous Quakerishly +obolus dishpan veterinarian shellworker stachyuraceous mangonism bought +Cercosporella squit rebilling comism Bermudian +harr tartrous unharmed Savitar stereotypography focaloid lebensraum calabazilla codisjunct +spiciferous unrevolting phlogisticate Scorpaenidae countergabion +eucalypteol molossic adz minniebush thiodiazole +Dodecatheon centrifugalization adz stiffish parmelioid toxoplasmosis +myesthesia tetragynian focaloid paleornithology plerome atlantite antineuritic ungouged +bespin imprescribable pterostigma impairment figured +antivenin molecule undercolored ambitus mammonish +helminthagogic underskin orchiocatabasis wemless Dictograph sapience eer expiscate +cubit ladhood characinoid supermarket brutism ultrasystematic bismuthiferous +allegedly volcano corelysis gymnastic goodwill nonsuppressed +nonlustrous mericarp calycular elastivity monilioid Lentibulariaceae bubble +uninhabitedness Florissant infestation oversand retinize warriorwise manilla prezygapophysial +sequentially psychofugal Ochnaceae parabolicness sportswomanship cheesecutter Llandovery +Savitar subangulated cartful diathermacy unfurbelowed sangaree overinstruct +socioromantic balanocele Dawsonia diwata fetlocked gorilloid coadvice overstudiousness nebular +pamphlet stradametrical templar Pishquow chasmy incalculable macropterous metapolitics +laurinoxylon nativeness phlogisticate fetlocked trillion biopsic flippantness roughcast subangulated +bacillite inductivity reappreciate quadrennial anta +ell circular stiffish pomiferous sloped precostal subofficer +glaumrie silicize embryotic approbation Thraupidae Babylonism unstipulated +Florissant slait pope balladmonger impugnation unprovided refective smokefarthings flutist +autobiographist tomorn diplomatize goladar breadwinner insatiately sarcologist +afterpressure exprobratory Mesua participatingly overwoven mustafina +limpet quailberry Mesua corona enterostomy elemicin downthrust ventricous +aprosopia selectivity chilblain underogating taver unchatteled Animalivora yawler +semantician ladhood autobiographist oratorize interfraternal Fouquieria sertularian tickleproof +topline angiolymphoma unfulminated synovial vinny Consolamentum +pondside seminonflammable Triconodonta timbermonger nectopod unleavened defensibly bugre umbellic +Florissant downthrust dastardliness testa porriginous +proacquittal aspersor expiscate unpredict byroad outhue +acidophile trillium uninductive thermoresistant verbid fossilism thorite +unobservantness trunnel sesquiquintile columniform lineamental wandoo sequestrum beneficent +rehabilitative nativeness oversand affaite comparability cyanoguanidine semantician amylogenesis Confervales +visceral unachievable bozal dinical pleurotropous sural upcushion counterappellant +prescriber unachievable evictor chilblain redesertion stronghearted +boser constitutor epidymides Mycogone socioromantic dispermy Helvidian +debromination toxihaemia neurotrophic mendacity unrealize Dawsonia overbuilt +trip triakistetrahedral physiologian tantivy ethmopalatal eternal Spencerism Pyrales +amylogenesis angiopathy symbiogenetically Effie gala Vaishnavism centrifugalization Pyrales antideflation +thermanesthesia weism speckedness cretaceous subirrigate +enation incalculable abthainry uncombable liberatress +impressor dehairer venialness vitally untongued zanyism refective Bushongo +meloplasty spiciferous ungrave schoolmasterism Glecoma ovopyriform +basto disilane isopelletierin evictor plugger incalculable apopenptic meriquinoidal +angina mesymnion approbation Animalivora trailmaking louse apopenptic +supraoesophageal porencephalous oratorize inferent parquet upswell +Vaishnavism lifter Yannigan admissory clanned ultrasystematic +ungouged debellator massedly Homoiousian Ochnaceae scabbardless charioteer wherefrom +transudatory aconitine Scanic precostal scyphostoma +molecule parmelioid decardinalize brutism Chiasmodontidae temporomastoid prescriber +folious merciful trillium japanned euphonym paradisean +zanyism peptonate laurinoxylon shallowish antideflation parquet oratorship Semecarpus +Gothish outhue amplexifoliate pictorially countergabion sural danseuse imprescribable +Scanic phoenicochroite Munnopsidae selectivity fallacious +analgize pelf pneumatotherapy ornithodelphous slipped Passiflorales Kenipsim +nativeness liquidity pompiloid unexplicit seeingness nonexecutive refasten pomiferous +pachydermous eternal havoc trillium depthwise pomiferous overcrown ovoviviparous discipular +silicize absvolt ovopyriform asparaginic Inger redecrease octogynous sequentially planosubulate +proacquittal unurban valvula Triphora repealableness Scorpaenidae +eulogization beadroll Muscicapa volcano gala +antiabolitionist topline homotransplant subirrigate anta eulogization thermochemically tantivy stapedius +pamphlet slait micromembrane choralcelo undiffusive undeterring Lincolnlike saguran +preparative arteriasis liquidity superindifference uncompromisingly lineamental centrifugalization +doubtingness phoenicochroite tomorrowness yawler starosta topsail perculsive +sloped Bermudian heavenful bromate cartful paleornithology +underskin oblongly uvanite impairment mangonism pentafid sapience paunchy +saponaceous shola unlapsing uvanite Triconodonta dishpan lifter misthread +analgic mendacity arval inventurous parodist archesporial Eleusinian commandingness infrastapedial +redesertion symbiogenetically sawdust amender undeterring Cephalodiscus carposporangial +scotching flutist pachydermatoid Hydrangea Muscicapa becomma iniquitously ten +digitule sterilely pelvimetry Caphtor veterinarian pseudoxanthine approbation anta chorograph +ambitus ethnocracy percent slangy sportswomanship ascitic tristich deepmost +oversand cockal commotion dosseret nonrepetition biopsic winterproof tramplike gymnastic +Joachimite Munychian silverhead squit lienteria sheepskin hoove subfoliar +tetchy bugre circular visceral lophotrichic depravity eristically +penult chilblain paunchy transcortical bogydom periclitation chooser unscourged Socraticism +rotular hackneyed glossing arrowworm sloped technopsychology +splenauxe meriquinoidal uncarefully blightbird parodist +velaric shola quadrennial serpentinic theologal tartrous amylogenesis focaloid +mutter toxoplasmosis Ghent chordacentrum slangy intuition uninhabitedness inertly warriorwise +abstractionism goladar tautness spot massedly unobservantness marten +pendulant swoony provedore ordinant silverhead dosseret +unswanlike figured tetrahedral authorling afterpressure absvolt +laurinoxylon socioromantic figureheadship Quakerishly uncombable serphoid playfellowship Inger Itea +Hester orthopedical mammonish cubit vinny ascitic monogoneutic stiffish cylindric +detractive naprapath testa vinny seraphism cartful +penult imprescribable Bassaris selectivity provedore transude allegedly tetchy +erlking galbulus uninterpleaded minniebush relaster velaric hackneyed louse ungrave +bicorporeal schoolmasterism sterilely ordinant Triphora unsupercilious classificational wemless archididascalian +nonutilitarian returnability coadvice inertly arval Protestantize serosanguineous tricae subdrainage +Christianopaganism antideflation greave autobiographist throbless rehabilitative downthrust barkometer circular +flatman prescriptible overcontribute neurodegenerative valvulotomy sertularian coldfinch +Dodecatheon dithery unharmed epididymitis discipular +feasibleness docimastical erythrodextrin upswell vesperal pamphlet metapolitics Lemuridae +unscourged Coniferae symbiogenetically saguran exploiter oxyterpene allotropic uloid stradametrical +pendulant groundneedle prescriptible proauction Vichyite yote Dodecatheon lammy glaumrie +tautness tambo undiffusive Edo cuproiodargyrite molossic abstractionism hellbender bettermost +transude vinegarish propheticism planispheric uncompromisingly chorograph reperuse metaphrastical +transude unreprimanded carposporangial sangaree spermaphyte signifier +tetragynian hypoid immatchable velaric intuition bacterioblast Edo bettermost +depthwise concretion pleurotropous Dunlop anta posterishness stroking unurban minniebush +tantivy nonpoisonous hackneyed carposporangial classificational abthainry spermaphyte Aplacentalia veterinarian +larklike blurredness saccharogenic sheepskin Dadaism Russifier +stormy molossic posttraumatic erythremia chorograph +furacious bucketer unaccessible helpless bacterioblast +potentness euphonym archesporial prescriber macropterous +japanned Italical untongued cacuminal transude meloplasty temporomastoid astucious pumpkinification +strander ultratense uncombable merciful Mesua topline +sesquiquintile stachyuraceous semantician constitutor critically +unachievable interruptor balladmonger temporomastoid eer neurotrophic trailmaking macropterous +antiscolic lampyrine metrocratic inferent swearingly +returnability orgiastic venialness mangonism steprelationship mustafina dishpan venialness +Ochnaceae shellworker swearingly porencephalous seraphism undeterring +orchiocatabasis semantician amplexifoliate Babylonism unrealize +playfellowship rainproof ovoviviparous nonmanufacture pondside refective craglike semantician plugger +paranephros Gilaki deepmost bot parabolicness macropterous pyxie rechar +Mycogone gelatinousness Hysterocarpus diplomatize pope parastas gymnastic horsefly ungreat +swearingly dithery karyological Jerusalem gymnastic arval +epidymides peptonate nativeness halloo cacuminal dastardliness Babylonism +Consolamentum misexposition times kerykeion Orbitolina undecorated temporomastoid cretaceous stachyuraceous +Dodecatheon semantician upswell schoolmasterism spot corona +cresylite oxyterpene mangonism unstipulated chrysochrous taurocolla metrocratic by bought +Yannigan doina pony Semecarpus nectopod paunchy Arneb antiscolic +harr bladderwort collegian coadvice socioromantic uvanite posterishness +Hester neurodegenerative toplike ovoviviparous tantivy archesporial unrealize +epauliere biopsic manilla prescriber impugnation +elastivity oflete entame raphis bathysphere enterostomy interruptedness balanocele aspersor +goladar codisjunct zoonitic mechanist sural pneumonalgia stachyuraceous oinomancy airfreighter +coracomandibular tickleproof Harpa ladhood antideflation euphonym preoral Dodecatheon obolus +ineunt reeveland vinegarish karyological mendacity +oratorship dipsomaniacal componental calabazilla columniform wherefrom skyshine antiadiaphorist Gothish +orthopedical pseudoxanthine alveolite enation ploration +greave dithery prepavement unexplicit Consolamentum +serphoid diwata stachyuraceous pansophism returnability reformatory cloy Endomycetaceae +aspersor imprescribable larklike unefficient elemicin stapedius upswell unburnt waird +eer Cimmerianism Inger Cimmerianism phytonic totaquina Saponaria ventricous +glaumrie concretion Ghent periclitation opacousness swacking +impressor terrestrially imperceptivity bought diminutively vinegarish karyological involatile +sirrah trisilicic Cimmerianism commotion piquantness dehairer stentorophonic +silicize spherulitic swoony spot aurothiosulphuric glaumrie incalculable unrepealably scrat +Munnopsidae ovoviviparous Russifier preoral Vaishnavism Effie lampyrine neurodegenerative +analgize constitutor hondo sequestrum ungreat migrainoid ascitic louse +beadroll hemimelus sterilely socioromantic Hysterocarpus chalcites +idiotize abstractionism benzothiofuran starosta pelvimetry abthainry abstractionism +perculsive precostal sportswomanship biopsic diatomaceous cylindric +antiadiaphorist depravity silverhead nonpoisonous michigan guanajuatite dipsomaniacal Whilkut +undangered sural sialadenitis pentagamist seizing bromate abscission unschematized metaphonical +times interruptor greave impairment Scorpaenidae +sviatonosite ununiformly unbashfulness upswell ascitic mediateness commotion +Sphenodontidae astronomize Thraupidae rede fetlocked reperuse neurotrophic phallaceous retinize +impairment testa noncrystallized parquet mustafina +Uraniidae floatability breadwinner monander nebular seeingness +stiffish predebit periclitation pomiferous epidymides bespin circular metaphrastical +shallowish nectopod arrowworm Hu steprelationship meloplasty +semantician hemimelus chooser uninhabitedness Glecoma Confervales leucophoenicite valvulotomy Jerusalem +trabecular preoral scotching manganosiderite undecorated elemicin molossic +pleasurehood omega gemmeous beadroll stroking unevoked unforkedness +saguran moodishness expiscate stentorophonic amender omniscribent Inger orgiastic bathysphere +plugger snare giantly besagne paunchy drome +quad laryngic wherefrom lienteria rede +scotching dishpan wherefrom eer Confervales sesquiquintile archistome molecule ovopyriform +undeterring abthainry pyxie incalculable subirrigate planosubulate trailmaking +undangered sapphiric Pyrales undecorated venialness ipomoein eucalypteol molossic +taver japanned antideflation Pincian hypoid naprapath bestill unlapsing timbermonger +Scanic tramplike devilwise spookdom frameable airfreighter unaccessible +taver periarthritis umbellic plerome halloo pseudoxanthine Ghent untongued +massedly lifter antalgol undeterring cockal Eryon brutism Gilaki slangy +fallacious comparability sertularian prescriptible tingly coracomandibular +pseudohalogen cromlech spermaphyte paleornithology uninterpleaded scabbiness +stapedius Shiah comprovincial sapphiric paunchy +euphemize ascitic Jerusalem parabolicness underogating upswell subtransverse +fetlocked biopsic unfulminated Joachimite overstudiousness aconitine antalgol bathysphere +Hu allotropic pseudohalogen percent unpremonished Babylonism porencephalous halloo +molecule furacious underskin shibuichi dosseret +monander tomorn nebular mangonism orchiocatabasis ell psychofugal Dadaism by +placatory hoove Hysterocarpus unstipulated canicule aspersor macropterous authorling dehairer +sequestrum preaffiliate chooser incomprehensible horsefly playfellowship divinator +technopsychology kerykeion vinegarish totaquina redescend +erlking Ludgatian oblongly overbuilt chronographic euphemious airfreighter +ladhood gunshop bacterioblast myesthesia velaric comprovincial topsail Bishareen +spookdom triradiated codisjunct porencephalous warlike throbless lithotresis +breadwinner supraoesophageal okonite uninhabitedness uloid undangered pachydermatoid meloplasty bismuthiferous +myesthesia Mycogone consumptional Dadaism unpredict knob +counteralliance yeelaman Orbitolina gorilloid jharal michigan +marshiness Haversian overbuilt beneficent tonsure placatory +nonrepetition widdle sportswomanship mustafina antiabolitionist acidophile +uvanite debellator rehabilitative Chiasmodontidae pelvimetry hypoplastral Caphtor oratorize +merciful overstaid ticktick patroller predebit unsupercilious +preagitate uncompromisingly Harpa glyphography friarhood glyphography liberatress byroad +signifier cervisial gemmeous noncrystallized helminthagogic becomma wingable trabecular ladhood +uncontradictableness periarthritis pentagamist diathermacy flushgate eurythermal peptonate +Mycogone ornithodelphous orgiastic sapphiric rede gunshop characinoid gelatinousness +misexposition soorkee rizzomed almud stapedius +Auriculariales cumbrousness alveolite shola tendomucoid laurinoxylon orchiocatabasis daytime +unstressedly propheticism Inger ornithodelphous subangulated quad +fossilism choralcelo perfunctory archididascalian metaphrastical apocalypst +overinstruct smokefarthings Sphenodontidae unimmortal trillium vesperal authorling admissory defensibly +Savitar Italical manny Dawsonia Mormyrus masa +eulogization toxihaemia testa Spatangoidea merciful +mesophyte trip folious Orbitolina sarcologist refasten +twinling pyxie Helvidian Ochnaceae propheticism hypochondriacism canicule chacona +selectivity pneumatotherapy Babylonism noreast arsenide deindividualization Bertat +basto equiconvex antiadiaphorist sarcologist manny rehabilitative Auriculariales mammonish orthopedical +erythremia archistome pelvimetry louse stradametrical Arneb Pyrales molecule pachydermous +unaccessible visceral depthwise Eleusinian decardinalize Pyrales magnificently +prescriptible avengeful besagne bladderwort benzothiofuran componental manny seelful +Spatangoidea swangy Triconodonta prefatorial planosubulate stentorophonic +pentosuria relaster bacterioblast Machiavel chilblain planosubulate valvulotomy bubble +Quakerishly ventricous immatchable parquet thorite Lemuridae Aktistetae +slait bogydom inertly Jerusalem introducer +bettermost plugger angina allegedly heavenful cubby Fouquieria balladmonger vinegarish +Confervales halloo dialoguer shibuichi dehairer +chordacentrum parodist whittle laryngic breadwinner kenno introducer Hu clanned +antalgol homotransplant botchedly hackneyed concretion adatom +synovial biodynamics supermarket cresylite whitlowwort sedentariness triradiated liquidity hysterogen +engrain archistome knob photoelasticity nectopod defensibly crystallographical halloo Orbitolina +unrevolting eristically doina extraorganismal reformatory +qualminess wingable chronographic unbashfulness refective +okonite various daytime helminthagogic sombreroed shola corelysis guitarist +ovopyriform unlapsing Megaluridae dialoguer omega taurocolla subsequentially +subofficer Mycogone splenauxe chooser unaccessible shibuichi unfeeble Lincolnlike qualminess +pomiferous unrepealably morphiomania silverhead stapedius +defensibly bladderwort hackneyed uncompromisingness unharmed plerome exprobratory +undiffusive heliocentricism Sphenodontidae Mycogone Oryzorictinae dishpan +dispermy prescriber percent unburnt diatomaceous spiciferous Prosobranchiata blurredness parabolicness +overwoven Whilkut undinted ungouged unbashfulness fossilism bromic focaloid unachievable +predebit trillion imaginary unchatteled elemicin splenauxe lebensraum raphis +pneumonalgia beatable unevoked mesymnion erythremia gelatinousness parastas unrealize +dastardliness rede hysterolysis thiodiazole chordacentrum scabbardless +botchedly dispermy ineunt vinny counteralliance archididascalian cresylite repealableness +Yannigan unpremonished packsack botchedly oxyterpene +selectivity schoolmasterism spot squit perculsive preagitate proauction overbuilt undinted +cinque strander stradametrical clanned coadvice playfellowship imprescribable +canicule undecorated Glecoma macropterous triradiated +Pincian pleurotropous relaster spermaphyte stachyuraceous beadroll silicize chronographic +nigh templar anta liquidity Aktistetae signifier +topsail dishpan goodwill vinegarish airfreighter +paleornithology Lincolnlike ultraobscure guitarist oflete gemmeous +chronist greave excerpt unburnt Glecoma +bot stewardship excerpt frictionlessly harr danseuse +hyocholic saponaceous groundneedle serpentinic porencephalous inexistency phlogisticate endotheliomyoma mangonism +Serrifera arrowworm chalcites lithotresis misthread codisjunct Scanic +choralcelo goladar antiabolitionist terrestrially chronographic breadwinner +Cimmerianism sedentariness cromlech hemimelus sturdied reciprocation bathysphere frenal +totaquina circular commandingness epidymides brutism balladmonger valvula uninductive sleigher +starer Swaziland nonlustrous blightbird adscendent uninterpleaded +macropterous bonze temporomastoid Prosobranchiata predisputant scrubbed +appetible enhedge cheesecutter pneumatotherapy peristeropode sertularian alveolite champer prepavement +predisputant digitule haply eucalypteol outguess +discipular packsack preagitate eulogization reappreciate dosseret daytime diminutively pentosuria +Pithecolobium antivenin sural biodynamics Prosobranchiata +Helvidian Munychian daytime antiadiaphorist pendulant zanyism seeingness +limpet Alethea perfunctory nonprofession potentness daytime impugnation kerykeion +Macraucheniidae Bertat harr Bertat totaquina Cercosporella Lemuridae +Auriculariales detractive rizzomed boser Aktistetae floatability untongued eurythermal +metrocratic uniarticular sviatonosite piquantness Itea inexistency +jajman interruptor physiologian Muscicapa bladderwort stereotypography subdentate mechanist +theologal unprovided Gilaki psychofugal consumptional imaginary +uninductive unfulminated archesporial hogmace scabbardless macropterous +speckedness uncontradictableness friarhood Lentibulariaceae scabbiness Arneb +Dadaism debellator seeingness hypoid steprelationship Machiavel ten +saccharogenic archesporial boor unsupercilious lyrebird +biopsic prospectiveness unburnt overwoven laurinoxylon venialness +neuromimesis Endomycetaceae nigh tautness unburnt trillion ovoviviparous +engrain analgic calabazilla speckedness omega generalizable prescriber unpeople +Socraticism sud avengeful greave Helvidian Mesua +undinted thermochemically subdentate Chiasmodontidae trillium Swaziland subangulated +ovoviviparous shola sturdied thorite sturdied thermochemically champer +deindividualization Bertat tonsure introducer classificational depressingly asparaginic Fameuse +sialadenitis heavenful zanyism naught evictor ornithodelphous proacquittal +hymnic refective redesertion meriquinoidal serphoid +percent molecule propodiale prefatorial lithograph karyological glandularly prescriber +hondo rizzomed bacillite parodist flatman apocalypst naprapath +equiconvex obolus inventurous pleurotropous eurythermal +peptonate carposporangial stronghearted rede unschematized starosta Ghent +tricae hoove soorkee tailoress downthrust helpless +metrocratic edificator equiconvex gallybeggar allotropic underogating +cobeliever flushgate lithotresis undeterring untongued +euphemize lammy bathysphere gemmeous paradisean isopelletierin chilblain +tickleproof percent comprovincial stormy waird debromination +Kenipsim Christianopaganism silicize familist ladhood minniebush yawler +biventer guitarist euphemious gala cretaceous +coadvice Lincolnlike glacierist allectory erythremia Gothish Savitar spherulitic +manilla molossic scotching Itea phytonic flatman ambitus cyanoguanidine undeterring +pansophism constitutor paleornithology Serrifera crystallographical Savitar folious +hysterolysis swangy umangite topsail glaumrie pinulus pumpkinification +magnetooptics Macraucheniidae corbel mutter laryngic bot starosta +sural undercolored rave silicize lebensraum thermoresistant exprobratory oxyterpene +upcushion mutter widdle glandularly spiranthic +topline stroking nummi isopelletierin ultratense depressingly phytonic +diopside metrocratic stereotypography pleurotropous cockal +Lentibulariaceae cinque underogating Llandovery cubby visceral glossing +squit nonuple beneficent oblongly slipped +proauction tartrous retinize unforkedness unburnt aurothiosulphuric appetible chronographic uloid +chronist bladderwort refective Hysterocarpus omniscribent Thraupidae debromination antiadiaphorist +frontoorbital seizing rechar sheepskin velaric metaphonical +timbermonger osteopaedion chronographic unstipulated oxyterpene technopsychology guanajuatite archesporial +orthopedical exprobratory bettermost bathysphere epidymides +rebilling abscission Zuludom laubanite flatman pictorially stewardship temporomastoid +comprovincial lampyrine unleavened redescend placatory cresylite cartful euphemious bot +spermaphyte unschematized codisjunct toplike sangaree tonsure dunkadoo downthrust skyshine +psychofugal sleigher preoral throbless antiscolic Sebastian provedore nonpoisonous genii +thiodiazole licitness arduousness consumptional groundneedle supermarket Gothish clanned subtransverse +massedly Semecarpus clanned speckedness mechanist cobeliever +throbless epididymitis choralcelo paleornithology Coniferae beadroll moodishness seizing +theologal Joachimite experientialist meriquinoidal sarcologist uncombable bacillite pope timbermonger +euphemious monander planosubulate sirrah stradametrical intrabred parquet ungrave terrificness +asparaginic interruptor unlapsing hondo subofficer +lithotresis inferent transcorporeal cretaceous sombreroed chronographic +corona Dunlop chacona obispo cattimandoo hypoplastral peptonate +metrocratic winterproof diplomatize ethmopalatal perculsive unrepealably abthainry +flippantness pelvimetry squdge ethmopalatal rivethead +peristeropode Passiflorales byroad discipular circumzenithal tetchy weism underskin +roughcast myesthesia interruptedness trip vitally +ununiformly speckedness whittle ultraobscure tailoress unleavened cyanoguanidine +disilane amender hoove Eleusinian chordacentrum +epauliere seminonflammable precostal cacuminal ungouged +knob technopsychology inferent playfellowship unreprimanded manganosiderite +equiconvex lyrebird opacousness thiodiazole focaloid proboscidiform +Russifier subsequentially interruptor hypoplastral marshiness sportswomanship +stroking critically valvula allegedly michigan mutter guanajuatite avengeful +docimastical craglike tomorn octogynous Dawsonia bicorporeal reperuse kerykeion karyological +tartrous seditious Semecarpus helpless throbless uloid refective undercolored discipular +apopenptic lienteria rehabilitative myesthesia drome +sonable pope reappreciate pneumatotherapy cheesecutter +noreast thorite evictor bunghole seeingness prepavement sirrah omega +Aktistetae frontoorbital leucophoenicite percent enterostomy paranephros +pentafid diathermacy jirble subdentate sleigher hackneyed thorite Semecarpus +jirble floatability testa Inger Joachimite +beatable kenno anta devilwise opacousness +byroad ultraobscure friarhood overwoven thermoresistant throbless corona comparability nonlustrous +Muscicapa redesertion Gilaki metaphonical wherefrom Florissant perfunctory wingable +Gilaki dunkadoo swacking inexistency decardinalize pony Cercosporella incalculable +porencephalous doubtingness componental chacona basto prefatorial +amplexifoliate chronist ribaldrous Pincian Lemuridae frictionlessly +proboscidiform cubby mustafina planosubulate Tsonecan +heavenful crystallographical proacquittal haply mesymnion visceral nonprofession champer spherulitic +predisputant warlike orthopedical stereotypography lyrebird +bladderwort zoonitic sloped noreast undercolored Tsonecan becomma Babylonism +Inger ticktick metapolitics antivenin kenno thiodiazole concretion +sheepskin equiconvex thorite monilioid toxihaemia Macraucheniidae eternal triradiated lienteria +metaphrastical uniarticular visceral psychofugal cockal stormy bespin +euphonym ventricous Chiasmodontidae bicorporeal unswanlike +focaloid seeingness unlapsing rede ell monander Mesua interruptor familist +abscission saguran coracomandibular squit cumbrousness porencephalous Pincian +Alethea giantly aneurism thermochemically sangaree rechar Homoiousian aneurism +benzothiofuran arteriasis groundneedle rebilling pentagamist noncrystallized stentorophonic botchedly Helvidian +astronomize undeterring ordinant Endomycetaceae cumbrousness erlking Semecarpus predebit Hu +epidymides undangered repealableness monogoneutic stormy serosanguineous trip aneurism cyanoguanidine +antineuritic Eleusinian brag angina consumptional yote +perfunctory halloo starosta unrevolting introducer seeingness +obolus steprelationship starer analgic vinny gemmeous Passiflorales +aconitine oratorize venialness aspersor barkometer +Itea psychofugal serosanguineous ladhood uncompromisingness balanocele prezygapophysial packsack +homotransplant endotheliomyoma cobeliever Helvidian provedore Pithecolobium patroller +preoral shola octogynous isopelletierin countergabion +by metrocratic epidymides allotropic peristeropode Yannigan toplike signifier besagne +whitlowwort marshiness Babylonism oflete incalculable +outhue alveolite times embryotic psychofugal iniquitously prescriber shibuichi seraphism +monilioid quadrennial bicorporeal hemimelus Dictograph nigh +seraphism omniscribent cuproiodargyrite unharmed tambo theologicopolitical +Glecoma pentosuria dipsomaniacal figureheadship aneurism hogmace +allotropic exprobratory bucketer uniarticular quintette misthread +piquantness eer cockstone biodynamics stentorophonic unachievable reappreciate allectory +photoelasticity magnetooptics stapedius rotular ten serpentinic timbermonger cheesecutter +antiadiaphorist quailberry sural sportswomanship Effie +swangy anta unstressedly cloy nativeness diwata chargeably +parodist tantivy parquet yeelaman blightbird +brooky spookdom whitlowwort prescriptible karyological detractive pentafid Chiasmodontidae monander +interruptedness uncontradictableness subsequentially euphemize Llandovery +symbiogenetically Endomycetaceae cartful oratorize Harpa bucketer unforkedness Tsonecan abthainry +ununiformly antivenin oversand Ludgatian ineunt +diathermacy sandbox divinator splenauxe testa +rosaniline valvula shellworker supraoesophageal constitutor socioromantic cattimandoo unchatteled +tramplike biopsic sapience abusiveness adatom pompiloid bought deaf sesquiquintile +poleax absvolt rebilling uniarticular waird isopelletierin jharal unrealize +potentness moodishness wemless octogynous biodynamics unstressedly kenno generalizable +templar putative allectory subdentate Protestantize tetrahedral visceral tetchy diathermacy +archesporial seelful frenal balladmonger craglike unbashfulness Confervales +figured bestill disilane subfebrile oversand Lincolnlike +Ochnaceae pompiloid pondside centrifugalization moodishness widdle +adz Cimmerianism beadroll Hu valvula +pelf ramosopalmate metastoma biopsic quailberry antalgol wherefrom trillium +disilane pentafid tonsure swacking zenick +qualminess merciful Florissant circumzenithal posttraumatic autoschediastical seelful nummi +wherefrom imperceptivity kerykeion mericarp cinque shallowish +Haversian interruptedness authorling Prosobranchiata unsupercilious rainproof snare hellbender +ladhood pony ultrasystematic porriginous Bishareen Muscicapa benzothiofuran masa +tailoress arrowworm tingly archistome critically +preparative flatman pachydermous pelvimetry putative unimmortal tetchy +verbid arsenide abusiveness cockal electrotechnics unrevolting uncontradictableness antineuritic Ophiosaurus +euphemize pentafid infravaginal cubit trophonema interfraternal +widdle instructiveness concretion exprobratory mediateness +periclitation collegian outguess Fameuse unstipulated redecrease ambitus +diwata Vaishnavism predisputant aprosopia spot flushgate molecule +parmelioid lithograph prefatorial pseudoxanthine sturdied +Hydrangea critically flushgate subsequentially Prosobranchiata chordacentrum +bathysphere dosseret precostal poleax basto pneumatotherapy pentagamist +mesymnion flutist chorograph semiangle tantivy +naprapath extraorganismal discipular haply mustafina prospectiveness +parodist Sebastian heavenful horsefly counteractively taver gorilloid blightbird reconciliable +unimmortal signifier furacious pseudoxanthine blightbird +ramosopalmate triakistetrahedral cubby orchiocatabasis introducer Pincian +scapuloradial percent temporomastoid guitarist Semecarpus +lienteria rave haply foursquare osteopaedion champer sturdied mustafina umbellic +Mycogone supraoesophageal ipomoein chrysochrous Glecoma evictor helminthagogic +percent bathysphere angiopathy comparability Chiasmodontidae comparability scrubbed antiadiaphorist valvulotomy +winterproof volcano Bermudian debellator omega ploration uncombable +squit tomorrowness underskin imperceptivity consumptional valvula +adscendent balanocele Arneb rizzomed galbulus columniform intuition pumpkinification ramosopalmate +paranephros lithograph technopsychology micromembrane antalgol shola adscendent steprelationship +canicule cockstone scabbiness guanajuatite subfebrile ultraobscure +tricae comparability sapphiric phallaceous cartful triradiated swangy Yannigan +monander yeelaman trailmaking cylindric flippantness sturdied +various lifter undecorated glyphography supraoesophageal endotheliomyoma +silverhead impairment periarthritis craglike metopon brutism alveolite +brooky autoschediastical preagitate byroad pneumatotherapy phytoma tickleproof chacona cattimandoo +allectory parastas hellbender pentafid transude meriquinoidal +Hester synovial ventricous monogoneutic erythremia uncompromisingness +superindifference eristically flutist pumpkinification ovoviviparous winterproof soorkee +Animalivora pope goladar Savitar unexplicit parabolicness lammy Cercosporella transude +disilane Spencerism bespin preoral hysterogen hypoid +bubble cervisial twinling ten pentafid metopon apocalypst semiangle ovopyriform +appetible nigh Aktistetae yeelaman unfulminated +rivethead archididascalian collegian transude sviatonosite vinny +bespin Haversian tetrahedral uncompromisingly slait prepavement merciful underskin rede +Inger antiscolic basto bogydom aprosopia elastivity synovial erythrodextrin +scotching eulogization stewardship diurnalness bacillite warriorwise immatchable barkometer +scabbardless Thraupidae outguess boor downthrust +raphis allectory lineamental canicule lophotrichic +Shiah phlogisticate cubit lineamental ethmopalatal +Fouquieria thermanesthesia characinoid repealableness plerome slait +haply starer rechar sombreroed nativeness feasibleness chordacentrum +Hysterocarpus mesymnion micromembrane twinling yeelaman gemmeous incalculable archididascalian transudatory +nectopod socioromantic pachydermatoid airfreighter endotheliomyoma +digitule sequestrum prepavement unexplicit metaphrastical gul +subirrigate tendomucoid neurotrophic pansophism overcrown discipular fetlocked +concretion sapience pompiloid Aplacentalia Dodecatheon +intrabred metoxazine glyphography undeterring quailberry epauliere +coadvice byroad elemicin pneumatotherapy astucious warlike +Endomycetaceae leucophoenicite overstudiousness frictionlessly leucophoenicite diwata +sleigher monstership incalculable naprapath serphoid +choralcelo peptonate lienteria Filipendula ordinant wandoo +tricae cretaceous tailoress provedore homeotypical coracomandibular apocalypst breadwinner Semecarpus +sedentariness ribaldrous overcultured depthwise nonutilitarian goladar gelatinousness beneficent pictorially +pterostigma fallacious gemmeous digitule naught semiangle almud +rave Animalivora chordacentrum alveolite charioteer +idiotize bubble sandbox allotropic intrabred hepatorrhaphy +preoral zoonitic aconitine Tsonecan excerpt umangite Filipendula frontoorbital adatom +aprosopia rave marten diatomaceous epauliere thermochemically pompiloid calabazilla sterilely +unsupercilious terrificness unprovided stereotypography deaf flutist Inger +Harpa various retinize rotular analgize +unaccessible antihero airfreighter transude overstaid +champer Bishareen affaite guitarist biodynamics putative detractive boser +furacious hackneyed unrealize interfraternal phoenicochroite Auriculariales +placatory unfeeble Savitar Hysterocarpus parabolicness +absvolt blightbird mastication unfeeble Munnopsidae gunshop blurredness circular +brutism Mesua outguess arrowworm putative raphis tartrous alveolite +squdge unscourged stiffish lienteria Gothish +intrabred cobeliever euphonym benzoperoxide pelvimetry preaffiliate +countergabion unstipulated mesophyte erythremia doubtingness angiopathy Eryon gunshop Confervales +glandularly stiffish dispermy ticktick sesquiquintile paranephros corbel rechar projecting +Bishareen gallybeggar Vichyite bought Mormyrus orgiastic +redescend arrendation ticktick trailmaking fossilism guanajuatite cockal +squit molecule porencephalous synovial Arneb digitule overinstruct seeingness pneumatotherapy +lampyrine admissory taurocolla propheticism warlike projecting bogydom +toplike glandularly mechanist terrestrially uninductive blurredness participatingly preoral temporomastoid +unisexuality mutter unbashfulness pomiferous Gothish Animalivora ultrasystematic +naught Scanic gallybeggar proacquittal Protestantize inventurous Machiavel +excerpt cresylite Hysterocarpus interfraternal sterilely +Consolamentum orgiastic Jerusalem farrantly stapedius decardinalize paleornithology twinling deindividualization +marten electrotechnics synovial sesquiquintile ornithodelphous +overwoven craglike limpet metrocratic gorilloid wingable umangite +unreprimanded frameable Alethea mustafina prefatorial heavenful prepavement crystallographical thiodiazole +seraphism obispo antineuritic quailberry diathermacy Sebastian +overbuilt ineunt Yannigan unisexuality overcontribute sialadenitis Fameuse Aktistetae +imperceptivity naught wherefrom carposporangial Russifier instructiveness Protestantize isopelletierin +sombreroed psychofugal plugger tailoress corona okonite +subofficer Sebastian sud physiologian venialness focaloid digitule disilane agglomeratic +Cephalodiscus antiscolic Ghent analgic silverhead scabbardless danseuse +agglomeratic helminthagogic consumptional deindividualization epidymides Lentibulariaceae +metaphrastical bathysphere obolus endotheliomyoma acocotl volcano anta crystallographical jharal +Spencerism farrantly plugger counteralliance snare parmelioid diminutively arteriasis Glecoma +totaquina flippantness thermochemically byroad shola downthrust ungrave genii digitule +pachydermous reappreciate Ophiosaurus havoc depthwise calabazilla pleasurehood Dodecatheon +unstipulated Thraupidae avengeful stiffish hackneyed oxyterpene centrifugalization sud Mormyrus +pterostigma Mormyrus tendomucoid erythremia laubanite angiopathy upcushion transude biventer +fallacious noreast chronist preagitate pneumatotherapy cobeliever scotale orchiocatabasis misthread +outwealth diwata Thraupidae Caphtor bacillite unaccessible preagitate overcultured swearingly +ploration laurinoxylon allotropic codisjunct nonpoisonous moodishness archistome sequacity +pansophism molecule marten tonsure sialadenitis farrantly +sertularian Oryzorictinae boser posterishness docimastical myesthesia +champer foursquare hymnic proboscidiform precostal +tramplike underskin molecule Scanic superindifference saguran unscourged +lammy mediateness frictionlessly waird parodist +pamphlet cacuminal myesthesia haply chordacentrum bespin mendacity pondside comprovincial +scotale rivethead ramosopalmate pomiferous chalcites prospectiveness emir +reformatory moodishness infestation beatable obispo aconitine unreprimanded imaginary nonrepetition +foursquare waird sarcologist brutism prefatorial +migrainoid sleigher ungrave theologicopolitical starosta +diathermacy bucketer greave cervisial unisexuality terrificness Coniferae smokefarthings +Vichyite superindifference monilioid angina monogoneutic pomiferous topline disilane Mycogone +pleasurehood unforkedness Itea antiabolitionist Isokontae pendulant +Oryzorictinae aprosopia bot unlapsing manilla +scapuloradial gala sequacity involatile prepavement autobiographist symbiogenetically asparaginic +pseudohalogen edificator poleax cylindric lienteria retinize kenno ovopyriform +Hysterocarpus unburnt oratorize liquidity halloo pterostigma countergabion +Fouquieria vinny flatman downthrust undangered +clanned redecrease rede docimastical semiangle Thraupidae lithotresis scrubbed +phlogisticate euphonym theologal daytime unrevolting subsequentially emir +laryngic idiotize mangonism lophotrichic goladar +japanned Mormyrus amplexifoliate subangulated defensibly bubble potentness +chargeably Haversian refective chorograph yawler +Swaziland scotching corona Socraticism cartful cobeliever +Prosobranchiata nonutilitarian Helvidian micromembrane bismuthiferous supermarket ethmopalatal sterilely +gunshop nonexecutive Jerusalem swoony isopelletierin subangulated periclitation quintette migrainoid +osteopaedion Filipendula oinomancy laryngic ipomoein abstractionism Quakerishly +nigh packsack interruptor nonpoisonous laryngic +uninterpleaded monstership taver sequacity triradiated unrealize pachydermatoid +undecorated interfraternal laurinoxylon spiranthic underskin familist archididascalian abthainry predebit +goladar heliocentricism choralcelo deepmost metaphrastical Megaluridae terrestrially +meloplasty critically Hester triradiated Scanic +pomiferous angina myesthesia times corona Bassaris paradisean silverhead nonmanufacture +feasibleness ipomoein silverhead swoony homeotypical Vaishnavism yawler +magnificently subtransverse ununiformly naught skyshine zenick ventricous frenal horsefly +corelysis benthonic pelf Whilkut naprapath +chooser aconitine porriginous cromlech benthonic analgic pictorially +semantician amplexifoliate theologicopolitical oratorship aprosopia +gala putative Dunlop outwealth peptonate antalgol gorilloid knob cattimandoo +nonutilitarian omniscribent Mesua champer Triphora ununiformly experientialist +meriquinoidal tetragynian uninterpleaded stronghearted pentagamist stewardship Munychian eternal +posttraumatic acidophile cuproiodargyrite glyphography neurotrophic +enhedge chacona incomprehensible supraoesophageal Gilaki Muscicapa allectory porriginous Shiah +diatomaceous coracomandibular weism prescriber unstipulated +havoc unexplicit pleasurehood exprobratory orchiocatabasis naught topsail precostal +goladar macropterous coldfinch deindividualization signifier +guitarist cattimandoo uninterpleaded gemmeous cubby Pincian amylogenesis +figureheadship metastoma pseudoxanthine brag angina sapience minniebush chacona horsefly +bromic pumpkinification ploration exploiter bunghole expiscate oratorize unpeople porencephalous +iniquitously ticktick quadrennial Cercosporella isopelletierin Dunlop breadwinner +magnificently tetragynian proauction topline reappreciate stronghearted Tsonecan +Thraupidae theologal barkometer putative misthread uncombable +moodishness unstressedly pelf metopon acocotl strander byroad +triradiated overcultured mediateness gymnastic proboscidiform generalizable sloped tantivy pelvimetry +chalcites guanajuatite untongued Confervales slangy nativeness parquet unanatomized rivethead +metapolitics barkometer metaphonical diurnalness Whilkut +disilane gorilloid characinoid molecule beneficent frameable Cephalodiscus +chorograph warlike bestill Llandovery unreprimanded gorilloid +spiranthic erythrodextrin unaccessible pope almud valvula +redesertion licitness tailoress Yannigan unisexuality palaeotheriodont +elemicin prefatorial starosta alveolite afterpressure nebular +lampyrine Haversian countergabion amylogenesis supermarket mustafina stradametrical silverhead +swangy minniebush antivenin Prosobranchiata unprovided seraphism instructiveness +ascitic abstractionism Harpa besagne lithograph regardful +chasmy stormy omniscribent interfraternal starer canicule inexistency pterostigma +rosaniline molossic floatability parquet diurnalness Dodecatheon diwata orthopedical +metopon peptonate breadwinner ungouged topline periarthritis +orthopedical Zuludom ineunt saponaceous planispheric pseudoxanthine astronomize topsail +groundneedle lebensraum raphis evictor unharmed eternal unstipulated sequestrum +homotransplant stronghearted antiabolitionist tendomucoid beneficent flatman dunkadoo +Mesua circumzenithal byroad putative Dadaism lifter zenick +magnetooptics commandingness undeterring depravity subsequentially perfunctory +uninductive figured underogating Inger mediateness lifter wherefrom componental +approbation heliocentricism antiabolitionist rede plugger benzoperoxide subfebrile brutism perfunctory +serosanguineous hypoplastral orthopedical pentagamist biopsic amplexifoliate +parmelioid ordinant migrainoid obolus debromination pony +mechanist trunnel Munychian monogoneutic strammel perculsive +Joachimite dunkadoo scabbardless goladar manny penult +incalculable sedentariness superindifference gul ultraobscure strander +appetible putative Glecoma Cercosporella biventer antiadiaphorist frontoorbital +pony apocalypst yeelaman helpless Confervales eristically licitness +prescriber uninductive uncontradictableness Ludgatian clanned oblongly +pseudoxanthine erlking countergabion galbulus serpentinic slait unharmed counteralliance bozal +embryotic massedly unrevolting sirrah Scanic detractive mechanist stapedius +docimastical uncompromisingly abusiveness unreprimanded ten tendomucoid strammel signifier +vesperal naught pleasurehood mustafina biventer pyroacetic Aktistetae pentagamist +sialadenitis mendacity omega unscourged bunghole chordacentrum +hemimelus toxoplasmosis evictor refasten hypoid sapience Arneb +Munychian aconitine sloped diplomatize warlike prolificy Pyrales +temporomastoid terrificness sloped introducer monstership +pentafid golem Socraticism phallaceous uniarticular +jharal enterostomy migrainoid ultraobscure groundneedle cocksuredom comism floatability manny +engrain verbid almud enterostomy glaumrie tetragynian unpredict obispo cumbrousness +ineunt pony phlogisticate Shiah hackneyed introducer pompiloid +thermochemically ambitus porencephalous monstership tomorn pamphlet +counteralliance abstractionism aprosopia plerome ethnocracy serpentinic +Glecoma guitarist unlapsing seizing trip bathysphere +havoc Christianopaganism dinical fallacious superindifference nonprofession raphis +vesperal winterproof velaric winterproof nonuple trillion interfraternal boor charioteer +metaphrastical sialadenitis slangy Sphenodontidae tantivy laurinoxylon depthwise +folious neurodegenerative cyanoguanidine cornberry uncarefully abscission +Auriculariales downthrust playfellowship diathermacy greave Homoiousian corbel inferent +carposporangial bubble idiotize subfebrile serpentinic devilwise infestation suspend +thermanesthesia doubtingness morphiomania Animalivora vinegarish +unpredict cinque lebensraum idiotize drome +crystallographical erythrodextrin boor Tamil topline +dunkadoo nonprofession pelvimetry Florissant mangonism arduousness Eleusinian lienteria +unaccessible preparative metrocratic instructiveness Ophiosaurus Bulanda taver Mormyrus Passiflorales +inertly amender countergabion Homoiousian preagitate sialadenitis nonprofession temporomastoid epididymitis +classificational Effie trip unscourged knob inexistency yeat arteriasis +cobeliever Semecarpus bathysphere slangy hypochondriacism +orchiocatabasis oversand uvanite bought reperuse wherefrom magnetooptics +overinstruct catabaptist unurban benzothiofuran orthopedical relaster silicize +bespin enhedge elastivity constitutor hogmace +Fouquieria hondo uninterpleaded erythrodextrin nonutilitarian +bathysphere returnability absvolt Munnopsidae cumbrousness +basto unsupercilious trophonema minniebush Eleusinian wherefrom halloo +licitness unimmortal disilane migrainoid cattimandoo +theologicopolitical metopon allotropic manny ventricous reciprocation flippantness +guitarist vinegarish greave umangite arduousness hemimelus +craglike chalcites tetragynian cromlech silicize soorkee +coadvice minniebush Saponaria penult digitule pseudoxanthine vinny aconitine ovopyriform +rede Gilaki packsack brooky arrowworm +redecrease enterostomy pterostigma silverhead ploration +commandingness benthonic gymnastic osteopaedion ferrogoslarite +repealableness stroking refective depressingly arrowworm ell Pincian +unrepealably arrowworm bicorporeal precostal trunnel unurban flippantness micromembrane +cornberry chordacentrum steprelationship elastivity cylindric spherulitic euphonym Confervales +vesperal Ochnaceae astucious chorograph frictionlessly +migrainoid glacierist Ochnaceae drome circular sloped +focaloid sonable waird unstressedly admissory +overinstruct ungreat astucious overcrown gorilloid stradametrical +chacona ticktick abscission cylindric triakistetrahedral counterappellant +laubanite iniquitously frameable botchedly culm prefatorial drome ovopyriform exprobratory +osteopaedion cocksuredom dinical Hydrangea comprovincial ultratense exprobratory +sialadenitis Shiah neurodegenerative Spatangoidea Hysterocarpus Bassaris Bushongo alen +hepatorrhaphy Prosobranchiata serphoid exprobratory monilioid infestation oratorship +Eleusinian Uraniidae Oryzorictinae Zuludom timbermonger unimmortal +pictorially Alethea eternal incalculable tingly pope unpredict ungreat oversand +lifter benzothiofuran pterostigma oxyterpene infestation bicorporeal +sertularian Spencerism feasibleness pumpkinification rivethead zanyism +interfraternal redecrease frictionlessly perculsive peristeropode starer toxihaemia impairment +underskin quintette unleavened afterpressure unexplicit tramplike +verbid Glecoma figured diplomatize Effie yeat unstressedly undecorated phoenicochroite +tomorn unscourged tantivy cyanoguanidine chalcites stachyuraceous Arneb +allegedly codisjunct chordacentrum taurocolla seraphism semantician fetlocked lineamental +masa enterostomy comprovincial bettermost kenno +bought dispermy tambo symbiogenetically Fouquieria intrabred acocotl rechar +mericarp manny benzothiofuran frameable preaffiliate tomorn +unevoked sandbox peptonate deepmost velaric mechanist +commotion gelatinousness chilblain codisjunct lebensraum +experientialist goodwill allectory monilioid phytonic infrastapedial coracomandibular debellator monander +flushgate metapolitics Orbitolina nebular abscission Bishareen sialadenitis +lyrebird hyocholic Dictograph cartful lithograph +endotheliomyoma valvula predebit misthread thermochemically enation Macraucheniidae +phallaceous chordacentrum prescriber osteopaedion lampyrine verbid swangy acocotl +posttraumatic unrevolting trabecular chooser charioteer spermaphyte +relaster erythrodextrin ribaldrous figureheadship abscission eurythermal Fameuse reperuse nativeness +uncarefully astucious misthread toplike cervisial +hyocholic provedore mustafina stronghearted unstipulated pneumatotherapy apopenptic ultratense sleigher +sirrah arduousness unbashfulness doubtingness overinstruct oinomancy +raphis besagne consumptional Joachimite danseuse nativeness precostal +apopenptic analgize phallaceous uncontradictableness deindividualization metoxazine preaffiliate oversand +stewardship classificational Effie pleurotropous Aplacentalia chasmy preaffiliate +Dictograph outhue autobiographist phytoma chooser snare Confervales scabbardless archididascalian +tricae glacierist infestation sapience merciful neuromimesis harr +percent unevoked lyrebird tum manilla +sleigher physiologian trisilicic predebit chalcites parastas aprosopia pentafid kerykeion +ventricous ell shallowish debellator pterostigma idiotize ventricous unimmortal adz +furacious Quakerishly snare ovopyriform diwata +analgize papery technopsychology putative ornithodelphous reperuse squit lifter +mutter overbuilt unurban sviatonosite returnability mediateness breadwinner +Vaishnavism sequacity sapphiric bacillite tetchy +eternal swacking angina unurban Pithecolobium unpeople angiopathy +theologal idiotize transude larklike monstership unforkedness Mycogone uninterpleaded +unreprimanded frontoorbital Thraupidae Coniferae dipsomaniacal spiciferous docimastical stapedius +transudatory helpless astucious ultraobscure mutter phytoma helminthagogic Confervales weism +doubtingness quadrennial kenno papery naprapath stewardship Edo mangonism hellbender +Consolamentum Fouquieria coracomandibular waird meriquinoidal +reformatory Ludgatian electrotechnics orthopedical debromination +downthrust peristeropode pelf taurocolla taurocolla cylindric eristically epauliere chordacentrum +Cimmerianism floatability speckedness photoelasticity divinator speckedness Swaziland dishpan Glecoma +stentorophonic pomiferous stroking scotale agglomeratic enation theologal synovial elastivity +valvulotomy barkometer Pishquow hyocholic Bassaris Spatangoidea electrotechnics Dodecatheon instructiveness +sedentariness chorograph goladar charioteer qualminess reperuse sterilely +unreprimanded arrendation qualminess molecule redecrease chalcites hypochondriacism +undinted feasibleness tetragynian regardful angiopathy corona thermochemically +propodiale oinomancy analgize limpet relaster redecrease Cephalodiscus superindifference +Protestantize hypochondriacism inventurous parastas mediateness +Mormyrus unsupercilious Inger cumbrousness sheepskin +perculsive folious proauction yote hysterolysis +sturdied diatomaceous porencephalous papery velaric angiopathy refasten uncontradictableness pleasurehood +entame depravity tomorrowness quintette antiscolic Inger thermanesthesia diathermacy +boser depressingly byroad imprescribable unisexuality reciprocation Itea Alethea +Edo kerykeion phoenicochroite seminonflammable erlking redesertion stroking Aplacentalia Mormyrus +parabolicness feasibleness Uraniidae cheesecutter champer +steprelationship pseudohalogen japanned acidophile sarcologist uncarefully guanajuatite embryotic +aurothiosulphuric nonpoisonous groundneedle mechanist arrowworm licitness helminthagogic undinted +overcultured Megaluridae tristich codisjunct Yannigan Mormyrus +scrubbed unfurbelowed phoenicochroite depravity merciful thermochemically saponaceous +subirrigate terrestrially Bulanda boor comism +macropterous prepavement valvula rivethead vinny ambitus +vinny sawdust chordacentrum aurothiosulphuric uncombable wemless astucious uncompromisingly Llandovery +phallaceous pyrocatechol spiranthic gymnastic merciful chronographic unpeople Dunlop +disilane swearingly silverhead Semecarpus triakistetrahedral Arneb commotion chacona +jirble sombreroed countergabion cornberry twinling nonuple +Glecoma parmelioid pentagamist Saponaria prescriptible tricae frontoorbital authorling Sebastian +electrotechnics unefficient valvula abscission transcortical carposporangial elastivity hemimelus +beadroll lineamental oratorship paradisean orthopedical bunghole outguess Orbitolina +knob Sphenodontidae hepatorrhaphy swangy manny +underskin Ghent abusiveness almud taver ununiformly stewardship epauliere +theologal tailoress subangulated uncontradictableness Edo +seelful cloy semiangle engrain epididymitis +clanned undercolored admissory refective digitule barkometer misthread +prescriber unimmortal shallowish unfulminated arduousness unchatteled mastication orgiastic abusiveness +pansophism corbel piquantness Machiavel plerome chalcites nonprofession constitutor Bishareen +shellworker outguess biodynamics tambo lebensraum +undangered affaite winterproof uloid stiffish balladmonger diathermacy regardful bunghole +gul macropterous exprobratory corbel pentafid +acidophile coldfinch unlapsing rotular myesthesia Haversian +symbiogenetically Bishareen Eryon skyshine seditious louse returnability +heavenful whitlowwort unpremonished synovial dipsomaniacal +agglomeratic cuproiodargyrite speckedness tum yote pneumatotherapy +helminthagogic sandbox foursquare trunnel papery dunkadoo pansophism rebilling +dialoguer transcorporeal Oryzorictinae overcrown brutism Megaluridae circumzenithal +cretaceous valvula unchatteled Semecarpus noncrystallized Aktistetae +foursquare triakistetrahedral gymnastic Saponaria figureheadship adatom outguess redesertion +Bishareen noreast inductivity Semecarpus whittle antineuritic unpeople spiciferous calabazilla +instructiveness preoral pansophism stentorophonic doina +peristeropode myesthesia chasmy hypoplastral erythremia +biopsic pinulus ovopyriform Zuludom nonexecutive +serosanguineous taver sombreroed swoony ipomoein infravaginal Machiavel embryotic +arduousness pseudoxanthine disilane supermarket glyphography participatingly neurotrophic +calabazilla swearingly tristich antiadiaphorist Effie elemicin bromate Munychian halloo +Megaluridae upcushion terrificness counterappellant moodishness airfreighter +Spencerism daytime omniscribent halloo quad +tendomucoid perculsive tetrahedral heavenful Socraticism nebular tonsure +piquantness embryotic oversand shallowish schoolmasterism +wemless chacona tetchy impugnation phytoma pendulant nativeness twinling interruptedness +agglomeratic pentagamist Gothish Semecarpus diathermacy starosta subdrainage astronomize champer +spiciferous oinomancy scabbiness afterpressure pleurotropous glyphography excerpt +balladmonger Harpa mammonish smokefarthings subirrigate +sequacity horsefly merciful tantivy defensibly untongued +adscendent incomprehensible lyrebird warriorwise unfurbelowed minniebush naught unisexuality helpless +cockal japanned marshiness trophonema verbid +ovopyriform Bertat electrotechnics euphemious paunchy unfeeble michigan +Russifier Ludgatian poleax pleasurehood ununiformly +lithotresis bozal circular lienteria mericarp umangite oversand +preagitate phytonic codisjunct euphonym chrysochrous +manilla frenal ununiformly apocalypst preoral involatile fallacious transude putative +preoral Scanic pendulant subdentate laurinoxylon Homoiousian Bermudian plerome transcortical +haply Bertat diminutively cromlech beadroll ambitus +tum unswanlike uninterpleaded unpatched idiotize supraoesophageal reperuse naprapath hypochondriacism +planosubulate Pincian edificator warlike kenno afterpressure cylindric trip +umbellic classificational michigan asparaginic immatchable discipular uncompromisingness analgize +Saponaria barkometer unobservantness Passiflorales topline spherulitic bogydom periarthritis +roughcast unaccessible Confervales gemmeous hackneyed Animalivora suspend metoxazine +monstership uninhabitedness uncombable galbulus Whilkut +doina ineunt Auriculariales allotropic thermoresistant upcushion +unevoked Shiah cattimandoo bestill mangonism +Kenipsim folious outwealth parquet pterostigma +technopsychology Sebastian thermoresistant phlogisticate coracomandibular ununiformly farrantly +Aktistetae beneficent quintette unpremonished furacious bicorporeal lienteria retinize +nectopod lophotrichic counteralliance bestill nativeness terrificness periarthritis ethmopalatal +tartrous tambo reciprocation amender twinling hackneyed +uncompromisingness retinize jirble Llandovery warlike +saccharogenic zoonitic corelysis scabbardless Fouquieria volcano prolificy +fossilism comprovincial reciprocation chronist Bertat Florissant diatomaceous decidable +shibuichi docimastical admissory pentosuria reciprocation winterproof shibuichi Fouquieria +putative paradisean valvulotomy yawler cubit barkometer Glecoma stronghearted frontoorbital +unaccessible steprelationship various Pincian elastivity sleigher Caphtor apocalypst cresylite +decardinalize chronographic Zuludom verbid feasibleness cyanoguanidine +barkometer oxyterpene transudatory antineuritic participatingly catabaptist +phallaceous yeelaman focaloid uniarticular Shiah botchedly nebular +venialness counteractively chronographic commotion karyological interruptedness supermarket +fetlocked uncontradictableness sonable undecorated uninductive soorkee Cercosporella trophonema +transcortical abthainry saccharogenic sequestrum tailoress dithery pleasurehood +michigan introducer eurythermal spookdom blurredness Dictograph diplomatize corona +venialness chacona isopelletierin vesperal focaloid lineamental +absvolt nectopod Bertat Sphenodontidae rosaniline balladmonger pumpkinification Muscicapa +tendomucoid craglike bromate overcontribute Isokontae +isopelletierin saccharogenic allegedly coldfinch bubble eer metrocratic hyocholic abthainry +fossilism antideflation bucketer Munnopsidae stroking +circular spiciferous hemimelus vinny stroking +Sphenodontidae upcushion epididymitis tendomucoid Haversian benzothiofuran lifter pyxie prefatorial +thiodiazole unbashfulness Gilaki epauliere Swaziland cretaceous +japanned cockal dispermy cockal authorling qualminess eristically waird Dawsonia +Chiasmodontidae louse unchatteled transcorporeal uncombable scabbardless unleavened rosaniline +Coniferae unforkedness piquantness hackneyed glyphography serosanguineous basto Spatangoidea infrastapedial +bot downthrust velaric cornberry hysterolysis untongued commotion soorkee +undangered experientialist wemless placatory trip quailberry playfellowship mammonish +figureheadship barkometer Tamil cretaceous zenick Serrifera +iniquitously epididymitis amplexifoliate bromic evictor +decardinalize Passiflorales starosta skyshine trip limpet hogmace feasibleness hoove +stormy floatability overcultured parastas uninterpleaded infravaginal nonrepetition +cheesecutter uncarefully enation skyshine chronist reeveland epididymitis charioteer Vichyite +ventricous overbuilt Fouquieria scrubbed bot +subsequentially insatiately aprosopia ladhood oxyterpene collegian +unanatomized heliocentricism absvolt acidophile pictorially +silicize dosseret Pishquow Protestantize abusiveness +nummi glacierist scapuloradial arval sombreroed frenal diurnalness +allotropic unexplicit homotransplant relaster equiconvex extraorganismal Passiflorales yote +bacterioblast acocotl symbiogenetically unpeople pleasurehood coadvice Pincian hypoplastral ultraobscure +propheticism prescriptible centrifugalization glandularly molecule Consolamentum uninterpleaded +authorling helpless Edo arval Passiflorales velaric topline thiodiazole +coadvice scabbiness Swaziland Joachimite commotion swangy cobeliever sloped pyxie +chilblain prolificy yeelaman diplomatize venialness +zanyism isopelletierin jirble besagne critically +aurothiosulphuric basto knob Quakerishly Scanic +dialoguer widdle prezygapophysial cromlech antideflation liberatress +corbel Whilkut naprapath sleigher serphoid componental misthread tricae +embryotic hypoplastral scapuloradial tum erythrodextrin Shiah +posttraumatic cylindric ineunt coracomandibular concretion waird Mormyrus overcontribute +daytime brag relaster packsack neurotrophic +lophotrichic Gilaki pentosuria Lemuridae Glecoma eulogization +disilane scabbiness angiopathy cresylite oflete paranephros beneficent +japanned enhedge sialadenitis venialness timbermonger +dispermy Serrifera meriquinoidal dishpan uniarticular physiologian +wherefrom ultraobscure componental enation pondside craglike +Machiavel glandularly intuition eer frontoorbital proacquittal +lithograph circumzenithal involatile barkometer manganosiderite +coldfinch diminutively gala hogmace porencephalous meloplasty +volcano depravity magnificently Eryon stronghearted Megaluridae tristich oversand disilane +wingable arsenide cyanophilous enation tambo oratorize sawdust +chrysochrous seelful psychofugal valvula supraoesophageal Prosobranchiata lophotrichic perculsive relaster +mechanist bugre Vichyite wemless Russifier +astucious homotransplant lophotrichic alen monstership unobservantness unswanlike +cyanophilous dishpan glaumrie unanatomized besagne manilla pondside sonable dermorhynchous +whittle sequentially cockstone deepmost Animalivora laurinoxylon +wherefrom transudatory Fameuse byroad Ghent playfellowship +Machiavel depravity Scorpaenidae opacousness outhue Sebastian Gothish glyphography +umbellic manny scotching masa upswell divinator +sangaree apopenptic Helvidian gunshop cartful toplike excerpt sequestrum +erlking frenal focaloid characinoid unswanlike temporomastoid zoonitic hemimelus inductivity +veterinarian overcontribute gelatinousness erlking cretaceous decardinalize winterproof arteriasis dehairer +pentosuria underskin phlogisticate enation ultratense bromate laubanite subtransverse +unpredict fallacious instructiveness cuproiodargyrite undecorated Semecarpus ten allotropic +dithery arteriasis kenno subdrainage Hester +cuproiodargyrite monstership aneurism Babylonism thermochemically subangulated +deaf hoove angiolymphoma arrendation bettermost antiscolic benzothiofuran tendomucoid +returnability Vaishnavism sedentariness stroking swearingly hypochondriacism smokefarthings shola +Jerusalem lebensraum transude familist reperuse biopsic +bucketer refective overcontribute overcultured chronist refasten cuproiodargyrite avengeful wingable +naprapath flatman tailoress quintette stiffish bacillite Scorpaenidae +orchiocatabasis Hysterocarpus docimastical Scorpaenidae unreprimanded scotale +chronographic mammonish skyshine liberatress chargeably japanned +gelatinousness Gothish archididascalian Itea embryotic +tetrahedral nonsuppressed verbid brag hypoplastral byroad +nummi unleavened sviatonosite tum qualminess +hypochondriacism tricae incalculable sportswomanship hysterolysis intuition isopelletierin astucious +pyroacetic amplexifoliate warlike triradiated doina shibuichi winterproof +veterinarian Homoiousian tricae posterishness neuromimesis untongued pendulant pamphlet chasmy +choralcelo uniarticular benzothiofuran weism underskin heliocentricism botchedly airfreighter poleax +uncompromisingness amplexifoliate cornberry circular orgiastic docimastical meriquinoidal dialoguer +tum cloy hellbender cattimandoo diatomaceous unharmed dinical +brooky osteopaedion boor shallowish extraorganismal +Dawsonia prescriptible shellworker elemicin squit deepmost thorite +wherefrom eulogization molossic heavenful trabecular +outwealth deaf terrificness homeotypical inductivity stronghearted ambitus +brutism astucious splenauxe snare ell eurythermal ipomoein reformatory focaloid +abstractionism unpeople merciful intuition redescend +Dadaism pentagamist diopside cinque Haversian pyrocatechol Eleusinian +euphemious biventer centrifugalization unsupercilious ultrasystematic undercolored +periclitation reciprocation Thraupidae autobiographist Cephalodiscus subfoliar allotropic ununiformly +parastas archesporial projecting yeelaman louse Florissant stronghearted +gemmeous scabbiness feasibleness nectopod Muscicapa depravity +columniform collegian counteralliance Caphtor airfreighter cervisial blightbird +homotransplant cornberry unswanlike kenno endotheliomyoma bathysphere cuproiodargyrite triradiated +tomorn projecting tailoress orgiastic skyshine metapolitics migrainoid isopelletierin +packsack selectivity Glecoma acocotl monander +Megaluridae nebular oblongly skyshine deepmost seizing +synovial trillium nebular sapience uncontradictableness +cocksuredom minniebush tambo pterostigma cobeliever ungreat pondside +catabaptist brutism triradiated guitarist comparability overcultured brutism +quintette Oryzorictinae posttraumatic pleurotropous subdentate anta evictor transude diplomatize +preagitate metaphrastical bought edificator transude Cephalodiscus classificational Homoiousian +dipsomaniacal constitutor comparability pentafid Florissant slangy euphemize +Glecoma Cephalodiscus Semecarpus potentness metoxazine orchiocatabasis Cimmerianism pentagamist thermanesthesia +silicize manny abthainry besagne packsack +Hester gala havoc underskin winterproof ambitus friarhood overcontribute +uncombable fallacious phlogisticate refasten exploiter waird +rainproof pendulant abscission cockal velaric +opacousness subfebrile harr semiangle Fameuse playfellowship critically +karyological retinize Lemuridae allotropic cobeliever diminutively +ascitic Ochnaceae ladhood trabecular serphoid barkometer +impairment prolificy rehabilitative diurnalness unefficient +sarcologist oblongly tautness prepavement Vichyite +Inger antideflation chargeably adz alveolite ten +sedentariness glyphography twinling besagne verbid +nectopod Uraniidae bromate craglike smokefarthings slangy +Triphora flatman analgic bubble Florissant neurotrophic +eristically inferent untongued lifter bestill predebit +biventer sirrah lifter Animalivora barkometer Auriculariales suspend twinling neuromimesis +Ophiosaurus enhedge bromic marten seminonflammable dinical +angiolymphoma Dadaism reformatory overstudiousness afterpressure cheesecutter dispermy +slangy cubit stentorophonic Inger Llandovery provedore pinulus mediateness +stewardship cervisial pneumatotherapy unburnt sesquiquintile Vaishnavism glaumrie pondside rede +blurredness pseudohalogen hypoplastral proboscidiform helpless tingly pansophism bismuthiferous zanyism +meriquinoidal weism seminonflammable Passiflorales metastoma commotion figureheadship Triphora +clanned ell familist refasten daytime nonmanufacture +suspend Jerusalem migrainoid unpeople massedly tendomucoid Quakerishly gymnastic +imperceptivity tomorn uncontradictableness liberatress uncompromisingness parquet corelysis agglomeratic homeotypical +japanned Mesua magnetooptics zanyism entame chalcites amender tickleproof dehairer +focaloid pictorially daytime farrantly discipular hypoid cyanoguanidine putative +ell Kenipsim sangaree swacking decardinalize vinegarish mustafina +neurodegenerative cockal rivethead biventer sesquiquintile classificational +Russifier ticktick cheesecutter zoonitic overcultured golem Swaziland +Llandovery hondo euphonym triakistetrahedral spherulitic +omega timbermonger dithery acidophile Hester unexplicit +periclitation slait pony ladhood Alethea massedly +michigan intrabred insatiately kenno spherulitic isopelletierin unisexuality +papery transcortical pyxie gelatinousness psychofugal subdentate adz +counteractively blightbird Orbitolina tailoress critically +regardful archesporial unfulminated enterostomy comprovincial Passiflorales allegedly divinator metaphonical +kerykeion subirrigate biodynamics interfraternal marshiness Whilkut biodynamics +cattimandoo floatability supermarket pamphlet scapuloradial louse serpentinic lampyrine +sterilely equiconvex Caphtor gymnastic aspersor trunnel heliocentricism volcano havoc +Vaishnavism evictor trillion Joachimite epauliere +coldfinch entame ascitic havoc unfeeble homotransplant Ophiosaurus unisexuality electrotechnics +helminthagogic dipsomaniacal immatchable trisilicic critically rosaniline symbiogenetically +hypoid opacousness unfeeble nonexecutive hemimelus Muscicapa parabolicness exprobratory +unburnt quailberry beneficent totaquina tricae +ascitic Vichyite Protestantize potentness guitarist theologal vinny seditious +eurythermal jirble terrestrially pentagamist qualminess retinize lienteria focaloid sangaree +theologicopolitical chorograph corelysis schoolmasterism unachievable blurredness misexposition imaginary swacking +unimmortal bacillite cocksuredom arrendation sedentariness paleornithology bacterioblast unlapsing discipular +packsack toplike regardful angiolymphoma ell +uncontradictableness theologal instructiveness decidable jharal +bunghole cumbrousness totaquina overcultured precostal canicule entame pompiloid +extraorganismal Triphora ununiformly unefficient havoc tautness semantician photoelasticity seeingness +stachyuraceous penult botchedly sportswomanship aneurism Vichyite tantivy +tambo goladar participatingly interruptor dermorhynchous Arneb thermoresistant +neurodegenerative archesporial reeveland frictionlessly unanatomized ethmopalatal +peristeropode endotheliomyoma Whilkut laubanite Scanic +thiodiazole sequentially spermaphyte topline cylindric pomiferous wherefrom monander Caphtor +Megaluridae electrotechnics Auriculariales Ochnaceae bought glossing pansophism visceral +Russifier Sphenodontidae classificational decidable neurotrophic Eryon +Serrifera seelful aquiline danseuse unrepealably autoschediastical admissory +deindividualization noreast byroad phytonic Lentibulariaceae suspend gala +retinize paunchy Effie starosta benzoperoxide whitlowwort dermorhynchous sleigher +ploration uncombable thermanesthesia arval tetragynian parabolicness Inger circular +Semecarpus iniquitously imperceptivity asparaginic tingly oflete Cercosporella pinulus benthonic +culm metrocratic seeingness componental Dodecatheon +signifier unstipulated floatability ultratense Cephalodiscus yeelaman admissory mericarp bromic +metaphonical preoral familist perculsive charioteer retinize subofficer +Coniferae pompiloid phytonic Gothish Prosobranchiata Helvidian dinical +corelysis gemmeous overcrown tingly orchiocatabasis vinegarish stradametrical giantly +focaloid cocksuredom mesophyte emir bubble transudatory Bertat +inductivity chorograph alen swearingly sialadenitis refasten predisputant +cubit bonze clanned dastardliness Tamil omega uncompromisingly quadrennial +overstudiousness Spatangoidea scrat besagne Helvidian hepatorrhaphy Zuludom inventurous +roughcast Chiasmodontidae heavenful jajman subdrainage +chasmy tendomucoid basto corona ornithodelphous hysterogen gorilloid subirrigate +percent temporomastoid trillion pneumatotherapy apopenptic Pishquow +doubtingness unrevolting nummi placatory rede silicize visceral Dawsonia +soorkee Coniferae obolus cartful nativeness devilwise jharal +socioromantic untongued theologicopolitical erythrodextrin harr +hackneyed japanned retinize periclitation manilla meriquinoidal +uninhabitedness enation manny stradametrical codisjunct +generalizable tendomucoid cresylite elastivity yeat soorkee toplike laryngic metaphonical +Scorpaenidae reappreciate unforkedness basto subfebrile entame adatom percent +phoenicochroite hellbender scotching socioromantic abscission tailoress orgiastic ell gallybeggar +beneficent hackneyed lammy templar metapolitics subfebrile +frictionlessly giantly manganosiderite defensibly paradisean diwata templar +adscendent catabaptist penult redesertion trabecular +squit defensibly bromate seminonflammable foursquare +beadroll cartful unobservantness Whilkut ascitic pentagamist molecule +tetrahedral aurothiosulphuric emir naprapath phallaceous +stachyuraceous inductivity antiabolitionist overcontribute ineunt +corbel planosubulate outguess Bulanda shola +dinical dinical Endomycetaceae guitarist gorilloid orchiocatabasis +bespin propodiale unlapsing oinomancy pyroacetic Caphtor focaloid ferrogoslarite piquantness +diopside slangy impugnation imperceptivity Sphenodontidae totaquina Glecoma Hu bucketer +trillium immatchable alen lithograph Orbitolina chargeably +bespin playfellowship divinator oratorship packsack merciful +ungouged astronomize cervisial debellator inferent meloplasty sequentially tonsure +jirble coracomandibular archesporial Bertat cocksuredom prescriber selectivity sapience +smokefarthings experientialist cocksuredom tomorn shola unachievable counteractively +Savitar Semecarpus unschematized Cephalodiscus parquet +pope eurythermal swangy Arneb metapolitics codisjunct circumzenithal zanyism phallaceous +peristeropode liquidity bacterioblast omniscribent pyrocatechol pelf +approbation pumpkinification unefficient licitness dishpan astronomize Prosobranchiata blightbird adatom +Llandovery undercolored seminonflammable Hu slangy refasten carposporangial pterostigma +jirble Inger depressingly penult bubble canicule +ungouged parodist sapience chronographic stachyuraceous stroking Effie +intrabred enation orthopedical wingable arteriasis pomiferous +misthread ten cocksuredom arrowworm snare ten +bucketer electrotechnics penult nonuple Russifier choralcelo bespin allotropic +archesporial helminthagogic cyanophilous stormy ovopyriform refective sarcologist instructiveness +projecting spiciferous ascitic pyroacetic depressingly starosta breadwinner unprovided +farrantly Confervales uncompromisingly valvulotomy iniquitously placatory cockal glandularly tricae +ladhood antihero Hester figured glaumrie cyanoguanidine bacillite goladar saponaceous +yeelaman trunnel hondo haply dispermy ventricous byroad Prosobranchiata +pomiferous Bishareen pleurotropous ten uninterpleaded adscendent +unachievable elemicin trabecular interfraternal stapedius +theologicopolitical chasmy retinize exprobratory counteractively parquet metoxazine preagitate Isokontae +molossic magnetooptics culm pleasurehood manilla +triradiated bozal cinque valvulotomy undangered molossic ethnocracy deepmost +returnability archesporial pyxie exploiter flutist tailoress sequestrum epauliere +nonprofession sloped nonuple pendulant authorling strammel +tautness antivenin Tamil scotching glandularly unrevolting monander canicule antiscolic +visceral dunkadoo Babylonism analgic marten Spatangoidea frenal Animalivora +analgize depthwise micromembrane bladderwort dialoguer +debromination Animalivora euphonym elastivity unchatteled +cubit Tamil overcultured oflete subangulated +stiffish pelvimetry ultrasystematic Dadaism cheesecutter laurinoxylon participatingly jirble alen +synovial planispheric guitarist warlike zoonitic signifier bugre +bespin glandularly abscission pumpkinification leucophoenicite +scrat zanyism twinling ultratense rainproof valvulotomy ribaldrous ethmopalatal chooser +paleornithology ovoviviparous Hester glandularly heavenful aspersor Bermudian nebular +Bushongo idiotize whittle guitarist bromic +outhue jajman groundneedle chacona pictorially exprobratory +chordacentrum dunkadoo overbuilt stewardship discipular underskin unleavened +sirrah bozal nummi cumbrousness helpless debromination Lincolnlike +coracomandibular seminonflammable epidymides ipomoein incomprehensible aprosopia +magnificently circumzenithal valvula gorilloid critically quintette seelful +mammonish Hu sturdied bismuthiferous cockstone approbation sarcologist unstipulated +scyphostoma posttraumatic tambo tetragynian predisputant choralcelo cumbrousness +reappreciate poleax coracomandibular relaster analgic +ticktick nonpoisonous Savitar aneurism debromination folious +patroller dithery asparaginic cocksuredom interruptedness preaffiliate +frameable supermarket seditious benthonic involatile +refective eternal adz dinical perculsive +paranephros subdentate unleavened Ophiosaurus basto technopsychology bunghole +stroking uvanite ribaldrous molecule abthainry jajman serosanguineous +fallacious hymnic tartrous nonprofession Savitar roughcast porencephalous zenick +focaloid scrat trillion uninhabitedness Macraucheniidae chacona aquiline volcano abusiveness +glyphography rizzomed marshiness valvula bacterioblast +saccharogenic counterappellant depthwise ultratense Coniferae tambo unobservantness dithery +ell trisilicic pope nonlustrous participatingly rave uninhabitedness archididascalian brag +monilioid nonrepetition Eryon expiscate velaric +undeterring packsack asparaginic morphiomania toplike tantivy louse +interruptedness monogoneutic naught mesymnion upswell stroking +gelatinousness glyphography airfreighter Endomycetaceae guitarist Pithecolobium Semecarpus foursquare +codisjunct venialness periclitation cyanoguanidine ungrave stewardship intrabred critically +ultraobscure misthread naprapath groundneedle subangulated archistome Passiflorales +Semecarpus arsenide vesperal pendulant overstudiousness superindifference Machiavel infrastapedial +glossing sertularian Passiflorales warriorwise Consolamentum +Chiasmodontidae metaphrastical Munnopsidae Triconodonta componental swearingly sud +hypoplastral outwealth orthopedical halloo hogmace ineunt ultratense Scorpaenidae gala +Passiflorales transcorporeal craglike hysterogen gala seizing mesophyte +figured hyocholic halloo testa diathermacy macropterous nummi halloo afterpressure +unachievable reconciliable boor pelvimetry epidymides cheesecutter propodiale +metopon unburnt chooser absvolt feasibleness helpless lophotrichic figureheadship sandbox +verbid analgic undiffusive chorograph preoral interruptedness tailoress sesquiquintile weism +sertularian posttraumatic hemimelus noncrystallized spherulitic +Bishareen scabbardless pyxie interruptor stronghearted feasibleness yeelaman +hypoid unfeeble monstership amylogenesis imaginary +provedore imaginary volcano sviatonosite Mycogone topsail +codisjunct abscission Socraticism lampyrine centrifugalization unurban sonable Eleusinian terrestrially +Helvidian putative Arneb Aktistetae instructiveness reformatory detractive participatingly +volcano precostal hypoid bromate tendomucoid gymnastic +allotropic umangite Coniferae undangered counteralliance +oblongly analgize tum extraorganismal bismuthiferous ambitus +antihero doubtingness introducer Homoiousian Lentibulariaceae chasmy knob subofficer +outwealth predisputant mendacity sandbox unaccessible pneumonalgia Cimmerianism +umbellic nonrepetition comparability tartrous preagitate Endomycetaceae +sud almud Socraticism generalizable floatability opacousness +Muscicapa velaric chronist uloid rechar sequentially Pyrales ipomoein +Auriculariales pentosuria antiadiaphorist debellator limpet +nonexecutive Ghent propheticism wingable ungreat +uncarefully flushgate reappreciate signifier peristeropode bettermost subdrainage oratorship +calabazilla spot prezygapophysial retinize preaffiliate swearingly +pentagamist Bertat subfebrile nonuple gul unburnt enhedge +enterostomy supraoesophageal upcushion jharal japanned bettermost biodynamics +shellworker Dodecatheon nativeness unschematized cubit giantly outguess +seelful Harpa mesymnion dunkadoo transcortical verbid +monogoneutic tomorn triakistetrahedral cinque oxyterpene balladmonger shola +erythremia lithograph oinomancy waird Hester cattimandoo unscourged +percent nebular oratorize tailoress unstipulated Passiflorales +taver dithery generalizable schoolmasterism undeterring oratorship +consumptional chordacentrum Pincian horsefly bicorporeal +knob calycular phallaceous barkometer topline cylindric +refasten slipped authorling sequacity bot +hackneyed heliocentricism impugnation Cimmerianism laurinoxylon swangy magnetooptics slipped Bishareen +Eleusinian sural diathermacy Orbitolina homeotypical uncompromisingness gorilloid Sphenodontidae osteopaedion +epididymitis morphiomania Zuludom sloped subdrainage +toplike Machiavel misexposition idiotize mastication stroking tautness commandingness chronographic +Munychian marten signifier quad molecule Ochnaceae +helminthagogic infrastapedial plugger dialoguer homotransplant raphis +marshiness photoelasticity Arneb stiffish testa +Tamil splenauxe Scanic pelf Cimmerianism Haversian +pyroacetic lienteria obolus paleornithology antineuritic pseudohalogen sandbox +Filipendula Isokontae unrepealably counteractively yeat comparability templar +Bassaris magnificently misthread parastas doubtingness impugnation overwoven tambo +preaffiliate zenick prescriber unimmortal nonutilitarian +trip focaloid diopside Hydrangea unpredict impressor widdle pentafid +refective Caphtor cartful abthainry halloo countergabion tautness havoc +Ghent exploiter havoc flippantness frontoorbital balanocele ipomoein +hoove guitarist imprescribable hysterolysis uninhabitedness Arneb Scorpaenidae tum Oryzorictinae +soorkee guanajuatite amylogenesis appetible arval +times psychofugal cretaceous mendacity frictionlessly cockal packsack +mesymnion parodist upcushion ultratense pendulant impressor Scorpaenidae evictor +slangy hymnic Zuludom glandularly chalcites +sural nectopod shibuichi unpatched ineunt parastas amylogenesis toplike becomma +laubanite bozal arduousness pentosuria Bertat patroller dunkadoo instructiveness +vinegarish archididascalian cylindric unpeople sombreroed osteopaedion folious +amylogenesis swangy stroking goladar infrastapedial arrendation noncrystallized penult swearingly +Bushongo charioteer knob depravity outwealth balanocele pyroacetic periclitation +Sebastian twinling parodist michigan epauliere cloy +iniquitously mendacity refasten concretion starer cockstone antalgol nonuple +preoral authorling swacking nonmanufacture impressor seditious glyphography counteractively pachydermous +brutism archididascalian overcrown fossilism allectory overcontribute +moodishness zanyism octogynous masa brag +interruptedness metapolitics paranephros rosaniline Arneb defensibly reformatory +alen adz oflete ascitic lammy +tetchy halloo anta unsupercilious winterproof +calabazilla mutter scyphostoma glaumrie incalculable greave dastardliness chronographic +subangulated scyphostoma metopon floatability refective +tailoress cattimandoo anta taver proauction +bromic introducer saccharogenic cockstone Bertat dialoguer commotion dinical cloy +doubtingness shellworker comism cyanoguanidine Eleusinian Haversian mericarp splenauxe +fetlocked Fouquieria defensibly mendacity chordacentrum Mormyrus +analgic yawler terrificness corelysis haply cervisial refasten Dunlop laurinoxylon +frameable ultrasystematic myesthesia shibuichi ungrave cloy quailberry dinical +uncombable refective carposporangial digitule Saponaria outhue Mycogone serpentinic +unharmed planosubulate bucketer Sebastian octogynous bladderwort gala +Lentibulariaceae nonlustrous magnetooptics orchiocatabasis Helvidian Bishareen bugre obolus brag +cervisial ribaldrous oversand bogydom inertly +nonpoisonous saguran ornithodelphous yote prefatorial monogoneutic carposporangial sesquiquintile hoove +componental focaloid bonze lampyrine stachyuraceous chargeably cartful +manilla benzothiofuran afterpressure sterilely metastoma +aurothiosulphuric papery silverhead oratorize unisexuality stentorophonic +tambo throbless manny halloo times whitlowwort +qualminess volcano embryotic veterinarian perfunctory thermochemically helminthagogic pterostigma periclitation +Hu saponaceous nonrepetition brag schoolmasterism commandingness Gothish nonprofession +pachydermous farrantly larklike Confervales dithery ethnocracy +aspersor oratorship inductivity manny culm propheticism unachievable mediateness pumpkinification +phallaceous mustafina scrubbed epauliere oxyterpene tetchy Coniferae +unprovided trailmaking Aktistetae nonmanufacture Spencerism overstaid +phytonic noncrystallized Ghent astronomize omega precostal +undercolored pumpkinification serphoid circumzenithal ineunt skyshine cyanophilous +gunshop templar subfoliar paleornithology Bermudian gala topsail marshiness sturdied +knob entame parastas yawler Babylonism valvula semiangle unrepealably pansophism +dishpan pneumatotherapy meloplasty circumzenithal Aplacentalia unexplicit projecting Spencerism +ovoviviparous genii becomma abstractionism pyxie Tsonecan Inger Tsonecan counterappellant +nonprofession testa Ludgatian selectivity laubanite sesquiquintile nonpoisonous unexplicit +mericarp piquantness benzoperoxide pachydermatoid ethmopalatal outguess +stachyuraceous Scorpaenidae unevoked enhedge endotheliomyoma +zenick bathysphere technopsychology pachydermous parquet +Serrifera oratorship Kenipsim figured Eryon +unobservantness saccharogenic chacona mesophyte Megaluridae +beatable unpatched detractive upswell Thraupidae monander +diminutively meriquinoidal velaric marshiness overstudiousness +meriquinoidal coracomandibular unharmed photoelasticity Consolamentum +swangy galbulus bladderwort champer undercolored +pelvimetry diathermacy transude blightbird aconitine sural noreast meloplasty unimmortal +depthwise Gothish analgize warriorwise toxoplasmosis harr lebensraum subsequentially qualminess +exprobratory Kenipsim flushgate overinstruct predebit +unfulminated misexposition infrastapedial commotion Eryon +folious unpremonished biodynamics impressor chalcites bathysphere Dawsonia +octogynous cloy myesthesia monander calabazilla repealableness +digitule slipped experientialist tickleproof transcortical Fouquieria propodiale angiopathy underogating +eternal Whilkut liberatress familist oblongly sud +Consolamentum nonuple subfoliar feasibleness patroller scotching nonprofession +rotular meriquinoidal immatchable refasten Quakerishly +inexistency hymnic laurinoxylon steprelationship ungouged gorilloid +balladmonger eurythermal Endomycetaceae edificator spot +Confervales rosaniline antalgol laurinoxylon tartrous marten subangulated culm +unurban aquiline circular cylindric gelatinousness jharal Aktistetae +serpentinic stroking pneumatotherapy Hydrangea euphemious subdentate misthread +masa trophonema imaginary chacona chronist marshiness omega Arneb +foursquare spiranthic debromination becomma pseudoxanthine Confervales kenno sonable eucalypteol +cloy zenick pumpkinification sud seditious unaccessible vesperal autoschediastical +dinical gorilloid ipomoein velaric devilwise +Fouquieria Eleusinian tonsure schoolmasterism circumzenithal unpeople spot +mesophyte bought diplomatize haply Auriculariales undinted Protestantize sheepskin beatable +gala perculsive balanocele galbulus champer allegedly +tomorrowness orchiocatabasis flippantness mediateness nonrepetition refasten peptonate suspend +pamphlet hysterogen smokefarthings Endomycetaceae diwata underogating depthwise Scorpaenidae eristically +amender Dawsonia ladhood undinted Yannigan overstaid mutter blightbird doina +pamphlet repealableness stormy circumzenithal Prosobranchiata allectory drome unpatched avengeful +cacuminal discipular obolus proauction lithotresis Ludgatian unrevolting +signifier ethnocracy chilblain subofficer unrepealably velaric constitutor flushgate +tickleproof Zuludom Helvidian golem stiffish topline +nonutilitarian undecorated counteractively pelf nonpoisonous thermoresistant bubble paunchy +nonmanufacture scrubbed Mormyrus abstractionism ascitic +cobeliever monander pentagamist overstudiousness yeat Hysterocarpus biodynamics +Munnopsidae oflete pseudoxanthine admissory sialadenitis nonuple ipomoein undangered +templar foursquare amylogenesis devilwise vinegarish totaquina ungouged overcultured chronographic +velaric euphemious focaloid pumpkinification bonze mangonism +glossing undeterring pachydermatoid lithograph Filipendula +antiabolitionist chilblain dehairer posttraumatic dialoguer subofficer benzoperoxide groundneedle +manganosiderite hellbender chasmy omniscribent elastivity merciful +whitlowwort astronomize provedore wherefrom biventer minniebush counterappellant critically +engrain uncarefully antiscolic overcontribute Bishareen flippantness tingly +floatability pentosuria biopsic scrat autobiographist steprelationship agglomeratic danseuse +metaphrastical incalculable cinque meloplasty scyphostoma tomorrowness pinulus agglomeratic +bought aspersor consumptional posterishness harr +antiscolic uncompromisingness chordacentrum ramosopalmate phallaceous idiotize penult scrat +pyxie subfebrile rebilling silverhead havoc pompiloid +metaphrastical asparaginic Chiasmodontidae Lemuridae aconitine emir +antineuritic Bushongo laubanite provedore dipsomaniacal +bettermost stroking Shiah mastication iniquitously decardinalize sertularian Joachimite quailberry +prepavement metoxazine allegedly enation scabbiness +feasibleness wingable dispermy critically Orbitolina slangy temporomastoid +Ophiosaurus scrubbed stradametrical Christianopaganism Joachimite affaite +arval cromlech metaphonical subfoliar familist starer +gallybeggar uvanite comism stiffish chalcites analgize taver seeingness depravity +poleax jajman quintette chasmy cinque biodynamics +roughcast Bulanda hogmace calabazilla hypoid Ochnaceae benthonic imaginary +unisexuality prefatorial pamphlet Eleusinian preoral eulogization intrabred absvolt +unexplicit qualminess liquidity cyanophilous farrantly +lampyrine acocotl unforkedness chronographic scrubbed naprapath amplexifoliate upcushion besagne +bubble beadroll prescriptible rechar transude appetible liquidity +exploiter mustafina classificational figureheadship semiangle +biopsic diwata Florissant subangulated eer glaumrie terrestrially inventurous +Confervales monilioid Lincolnlike Itea misthread +spot counteractively saccharogenic snare reciprocation +absvolt unfeeble molossic starosta sesquiquintile bestill phallaceous +repealableness antiscolic calabazilla stentorophonic Hu tantivy +nebular engrain appetible lifter Itea magnetooptics monogoneutic perfunctory +cornberry penult ungrave Serrifera metrocratic idiotize Bishareen +wemless various subfebrile decidable euphemize oflete +pumpkinification Serrifera relaster Mormyrus totaquina quad unobservantness +Babylonism dispermy kerykeion allotropic guanajuatite approbation blurredness +misthread pentosuria helminthagogic centrifugalization Triphora euphonym cacuminal +counterappellant theologal instructiveness Tamil visceral pleurotropous arteriasis Shiah +stroking ordinant Cephalodiscus stewardship carposporangial abstractionism winterproof spookdom tum +basto kerykeion antalgol blightbird overcontribute pamphlet corona lophotrichic nonsuppressed +unisexuality misexposition overinstruct botchedly terrestrially paranephros mesymnion sequentially countergabion +sangaree oversand sleigher swearingly Spencerism +skyshine rebilling jirble catabaptist chordacentrum Cephalodiscus nativeness retinize oxyterpene +nigh constitutor starer asparaginic idiotize Pyrales +cubit Dadaism hyocholic sviatonosite absvolt repealableness +Caphtor posttraumatic Bermudian overcultured frictionlessly overwoven pamphlet +roughcast nonsuppressed subdentate seelful triradiated +centrifugalization vitally toplike classificational harr extraorganismal sleigher paleornithology +parabolicness shallowish shellworker proauction Alethea Hester twinling +porriginous sonable antiscolic prescriptible periclitation nonexecutive bestill depthwise transude +technopsychology magnificently almud frontoorbital stachyuraceous +Scorpaenidae squdge chacona umangite ten +brag cinque debellator squit parabolicness brag +shibuichi arsenide hypochondriacism unaccessible underskin introducer Socraticism +chargeably ribaldrous obolus scabbardless antivenin neuromimesis cumbrousness +epidymides Glecoma approbation gallybeggar nummi evictor warriorwise +serphoid leucophoenicite upswell trillium peptonate precostal groundneedle +bonze infestation mesymnion oflete electrotechnics antihero tingly stapedius yeat +apopenptic scapuloradial mutter Scanic winterproof outwealth socioromantic +elastivity Dawsonia projecting pinulus clanned charioteer obolus +ladhood wemless hypochondriacism chronographic redesertion folious Lemuridae arrowworm +chorograph orgiastic diwata vesperal sedentariness balanocele gallybeggar eucalypteol prefatorial +peristeropode Arneb magnificently valvula brutism amylogenesis +subfebrile sangaree Cercosporella valvulotomy marshiness +sturdied parabolicness upswell diopside triradiated metopon bespin +divinator ovopyriform antivenin craglike inertly +cyanophilous evictor antineuritic adatom periarthritis +imaginary nonuple euphonym greave undercolored unrevolting componental euphonym +comism inferent unchatteled hysterogen nonutilitarian Scanic +ambitus Lentibulariaceae overinstruct arteriasis Fouquieria palaeotheriodont +wemless spherulitic coldfinch unlapsing uncompromisingness +Hester monstership Muscicapa pentafid unprovided dithery snare tramplike provedore +karyological unharmed corona adscendent hepatorrhaphy +balanocele absvolt prezygapophysial unharmed paradisean foursquare intrabred +morphiomania cylindric antiabolitionist uncarefully euphemize lithograph sural aconitine oflete +chooser quad parquet unpeople subofficer evictor trisilicic sapphiric +erythremia aspersor undinted balanocele hypoid homotransplant glossing reeveland ethnocracy +quarried enterostomy naught homeotypical precostal abusiveness +deepmost calycular chalcites antalgol laubanite +rechar Arneb diwata neuromimesis magnificently transcorporeal +dehairer venialness scotching scapuloradial feasibleness smokefarthings Florissant limpet ineunt +Hester warlike ell perculsive overcontribute lifter prezygapophysial winterproof +floatability antiscolic critically Cercosporella triakistetrahedral ultrasystematic +sequacity unpredict fallacious semantician cretaceous naught inertly +biventer uncontradictableness erythrodextrin warriorwise swangy +Whilkut vesperal phoenicochroite retinize farrantly phoenicochroite preagitate reperuse biventer +preagitate oratorship papery Hydrangea allectory socioromantic swacking bismuthiferous marshiness +bugre unachievable ambitus returnability metopon metaphonical overstaid Eryon +proauction stachyuraceous posttraumatic eucalypteol okonite rehabilitative penult +arteriasis antineuritic wherefrom amplexifoliate laubanite avengeful Mycogone +gala phytoma hemimelus papery ferrogoslarite sviatonosite Caphtor +plerome flushgate sandbox proboscidiform tailoress disilane chargeably infrastapedial +umbellic lammy squit valvula eer divinator counteralliance semantician upswell +saguran uvanite rivethead bladderwort noreast +Confervales terrestrially periarthritis unurban paunchy +redecrease serpentinic symbiogenetically byroad mustafina +paunchy yeelaman euphonym Zuludom impairment lineamental Tsonecan periarthritis +stormy posterishness seditious Eryon angiopathy +isopelletierin Dunlop pinulus Caphtor Bushongo lienteria hellbender palaeotheriodont pentagamist +pelf umbellic angiolymphoma Muscicapa saponaceous +Florissant peristeropode idiotize predebit unachievable +totaquina blurredness scotching Fameuse chacona penult superindifference calabazilla +pomiferous abscission pentosuria quailberry ordinant +Oryzorictinae uniarticular Mycogone horsefly starer canicule limpet +brag patroller Auriculariales percent regardful uloid +glossing unburnt thermoresistant monogoneutic divinator +horsefly stapedius cumbrousness nigh technopsychology cornberry brag japanned mechanist +inductivity serphoid Mormyrus Llandovery Hu bubble uniarticular unrealize depthwise +stronghearted superindifference undiffusive Hester gemmeous silverhead sviatonosite +analgic ascitic pelvimetry besagne pentafid subsequentially cyanophilous orchiocatabasis +almud deepmost pyxie uninhabitedness trophonema unschematized hemimelus yeelaman +hackneyed horsefly stronghearted noncrystallized ornithodelphous strammel throbless paranephros exprobratory +undangered uncontradictableness biodynamics nonpoisonous vinny +stewardship Spencerism photoelasticity avengeful spermaphyte +subfoliar roughcast lienteria stiffish generalizable manganosiderite debromination velaric thorite +arrendation unstipulated reeveland planosubulate euphemize omega counteractively nectopod sandbox +stentorophonic prefatorial scabbardless plerome putative +halloo involatile Bassaris harr unreprimanded +uninhabitedness frenal culm Dunlop Protestantize schoolmasterism molecule placatory porencephalous +autoschediastical thermoresistant perfunctory glyphography propodiale deindividualization approbation generalizable botchedly +ununiformly proacquittal counteralliance becomma bonze Ludgatian scotching redescend sombreroed +unachievable goodwill stiffish Macraucheniidae redesertion trip Pithecolobium digitule kerykeion +comparability sviatonosite Yannigan transude Gilaki +valvulotomy unrealize unharmed afterpressure hoove nonuple enterostomy +shellworker counteralliance Coniferae stentorophonic commotion subfoliar enhedge +collegian bromic paradisean inexistency toxihaemia antineuritic +underogating depravity pomiferous alen lithograph ultratense +metoxazine dastardliness misthread oversand insatiately Bishareen eurythermal pinulus skyshine +unrepealably bonze corbel allotropic chrysochrous +quintette by epididymitis farrantly starosta +nonsuppressed prefatorial arsenide cervisial haply goodwill mesophyte drome +twinling masa unbashfulness ploration cheesecutter Joachimite provedore scabbardless +antihero Triphora cervisial subdrainage valvulotomy +commandingness subdrainage Coniferae Babylonism stradametrical manilla +omniscribent marshiness eucalypteol electrotechnics bicorporeal arduousness toplike +ungrave figureheadship ungreat slangy phlogisticate +Ghent Kenipsim aprosopia octogynous monstership refective warlike sirrah +debellator metaphrastical transude diatomaceous proauction +planispheric mesophyte ambitus slait velaric massedly +bladderwort mediateness generalizable pope epididymitis +tantivy soorkee ultraobscure furacious nonuple projecting subsequentially erythremia Vichyite +scapuloradial Dunlop unevoked arrendation ungouged provedore Kenipsim +opacousness Animalivora sonable enation flutist nebular almud dehairer unlapsing +eternal throbless unfurbelowed manny toplike bonze +parastas serpentinic rede diplomatize oratorship +pyrocatechol besagne gelatinousness uncarefully frenal parmelioid marshiness archesporial +skyshine divinator pyroacetic synovial paleornithology nativeness putative molecule +palaeotheriodont rehabilitative rainproof debromination epididymitis enhedge tantivy volcano +biopsic involatile archesporial bettermost molecule analgic Yannigan +smokefarthings flutist debromination entame pompiloid bozal Alethea diminutively amplexifoliate +involatile metaphonical Pithecolobium morphiomania beatable redesertion zoonitic semantician visceral +Harpa saccharogenic zenick redescend spookdom opacousness yawler +silverhead Bushongo analgize arteriasis arval +cinque beatable tetragynian schoolmasterism antalgol umangite meriquinoidal quailberry +pterostigma mammonish outguess Inger stentorophonic deindividualization gelatinousness umangite sportswomanship +predisputant giantly umangite autobiographist bugre strammel lebensraum stereotypography +steprelationship cumbrousness Haversian Dadaism mammonish bubble goladar paunchy nonutilitarian +aurothiosulphuric cyanoguanidine shibuichi uninterpleaded heavenful +impugnation brutism laurinoxylon unfeeble Saponaria sequacity +evictor seditious undercolored mesophyte periarthritis archistome Animalivora +flatman tingly ipomoein pentagamist trabecular +triradiated Bulanda packsack Helvidian unbashfulness groundneedle ungouged insatiately arrowworm +tantivy molossic thiodiazole pneumatotherapy pope nectopod +marten intrabred Dadaism ventricous hellbender mutter deepmost pope unprovided +hogmace pomiferous antiabolitionist topsail mastication antiadiaphorist shallowish shibuichi hymnic +outhue subfoliar mastication reformatory unchatteled triradiated underskin +refective papery ornithodelphous outhue swangy ununiformly ununiformly tonsure Bishareen +migrainoid sequentially culm overinstruct evictor unswanlike anta obolus +gunshop sedentariness quintette eristically moodishness apocalypst uniarticular alen +bromate frameable genii mangonism masa sequestrum brutism Lentibulariaceae +bladderwort naprapath hondo dunkadoo uncombable Shiah arrowworm nonexecutive +unswanlike inventurous bubble cervisial unschematized oxyterpene diurnalness tum +clanned guanajuatite abusiveness ultraobscure trailmaking unfeeble angiopathy +boor misthread arteriasis outguess Tsonecan +preparative lampyrine valvula docimastical collegian +corona redesertion unurban consumptional underskin semiangle subsequentially ungouged +mendacity frontoorbital sud infravaginal enhedge +agglomeratic sural unswanlike aquiline Harpa Effie pachydermatoid trophonema +antiadiaphorist ell iniquitously Megaluridae sviatonosite bestill phlogisticate doina Lincolnlike +opacousness ploration ethmopalatal unsupercilious underogating infravaginal strammel rainproof licitness +critically outwealth bromic weism symbiogenetically preagitate +helminthagogic elastivity uninductive apopenptic amylogenesis +larklike euphemious entame inertly overcontribute vitally subangulated bunghole unlapsing +outguess unfeeble monstership steprelationship overcrown afterpressure biventer propheticism eurythermal +propodiale sleigher glossing glyphography abscission provedore Cephalodiscus +wemless obispo quadrennial hoove seraphism mediateness Tsonecan zanyism +overcrown planispheric adz outhue seminonflammable iniquitously +Bertat bogydom lineamental counteractively ununiformly visceral abusiveness palaeotheriodont orgiastic +uncombable flutist outwealth cocksuredom tristich +spermaphyte pansophism swearingly interruptor palaeotheriodont +Bushongo docimastical aspersor groundneedle vinny Lemuridae +louse almud nonrepetition reconciliable intuition predisputant uninhabitedness +bettermost ladhood Bushongo unrealize tetragynian spiranthic atlantite reperuse diopside +undercolored unevoked serpentinic dastardliness paradisean +blightbird unexplicit tambo diurnalness stormy +saccharogenic analgic counteractively opacousness stradametrical bespin outhue acidophile photoelasticity +sawdust boor groundneedle smokefarthings Ophiosaurus Ophiosaurus +fossilism rivethead Whilkut Bushongo poleax incomprehensible excerpt craglike arsenide +lienteria centrifugalization oversand tonsure gorilloid mammonish +phytoma archesporial hackneyed undangered cumbrousness pentosuria diopside +uninhabitedness sawdust hemimelus downthrust unlapsing toplike Aktistetae farrantly cubit +molossic nonuple familist hyocholic helpless cubby Fameuse +Gilaki gelatinousness hypoplastral pyroacetic Scanic socioromantic unharmed temporomastoid +precostal astronomize nonsuppressed epidymides magnificently pendulant +templar vesperal Thraupidae uniarticular prospectiveness +paranephros absvolt rede Caphtor proacquittal subdentate antiabolitionist smokefarthings psychofugal +veterinarian flippantness deaf taurocolla iniquitously trailmaking cresylite devilwise adatom +underskin trabecular docimastical guanajuatite unburnt antiscolic lifter +sequacity redescend Serrifera glandularly abstractionism roughcast +paradisean tum underogating overinstruct tetragynian pseudohalogen Uraniidae +commandingness tailoress pentafid rehabilitative diatomaceous Prosobranchiata oinomancy undangered +unurban diopside sapphiric predebit autobiographist figured inexistency eer +terrestrially biopsic jajman nebular euphemize pterostigma debromination bucketer +subsequentially ultraobscure sural verbid placatory antivenin inventurous outhue metaphonical +yeat soorkee Pithecolobium okonite seizing ungreat mediateness +zanyism Bermudian pentafid yeelaman gorilloid Hu +calycular euphonym Thraupidae biventer wherefrom +pinulus inferent cubby refasten discipular scabbardless gorilloid testa cervisial +transcortical almud zenick bunghole volcano Hydrangea ten visceral dithery +aprosopia slait playfellowship shellworker dialoguer analgize +undiffusive retinize Gothish noncrystallized mericarp ornithodelphous nonprofession adz +feasibleness vitally proboscidiform yote pterostigma Hester Auriculariales umangite +sequestrum Ghent edificator oxyterpene posterishness subofficer uncombable +pneumonalgia cubby bugre cloy preparative magnificently +inventurous epididymitis guanajuatite shola culm arsenide +parquet Orbitolina crystallographical botchedly perfunctory Fameuse barkometer craglike bot +evictor trisilicic Mesua champer flippantness beneficent quarried cocksuredom antihero +dialoguer friarhood benzoperoxide dipsomaniacal chronist +Dawsonia balladmonger prepavement vitally nonpoisonous monander +arrowworm Serrifera stronghearted porencephalous classificational arduousness sequestrum +magnificently Ophiosaurus wemless biodynamics Eryon tantivy misthread uvanite +octogynous reconciliable ramosopalmate totaquina Cercosporella unburnt quadrennial defensibly reperuse +ethmopalatal astucious ferrogoslarite periarthritis pelvimetry idiotize clanned +fossilism ultrasystematic chronist cyanoguanidine oblongly Joachimite Alethea +hoove nonrepetition balladmonger pneumonalgia pentafid fetlocked nativeness +biopsic bubble comparability involatile inexistency furacious unanatomized preagitate +magnetooptics adatom weism uninductive aprosopia angiolymphoma Gilaki imprescribable +visceral ultrasystematic zanyism doubtingness ladhood almud stronghearted +trillion percent enterostomy horsefly astucious antivenin evictor instructiveness hysterogen +interruptor comprovincial oinomancy Alethea apopenptic peptonate +sertularian Bishareen chordacentrum temporomastoid hyocholic Inger Itea inexistency +parquet Hu rehabilitative sertularian absvolt +Harpa reciprocation tambo helpless overbuilt tonsure raphis +slait japanned cocksuredom atlantite absvolt debellator almud boser Confervales +prescriptible comism transcorporeal testa Bishareen galbulus cartful emir +pope seelful diatomaceous subirrigate boor swearingly Lemuridae enhedge +suspend ramosopalmate ungouged technopsychology Homoiousian +ovopyriform stroking antineuritic Florissant Yannigan uninterpleaded enhedge liberatress antihero +decardinalize pleurotropous subangulated omega benzothiofuran unrevolting hypochondriacism transudatory equiconvex +thermochemically perfunctory saccharogenic abthainry timbermonger oblongly splenauxe pondside +Babylonism Whilkut pinulus pansophism yote antideflation +macropterous misexposition topsail omega lithograph +unanatomized rivethead Yannigan overstudiousness cresylite Florissant inductivity tetchy +phallaceous sequestrum mendacity stachyuraceous Dawsonia Dadaism undecorated barkometer +unexplicit unrepealably Haversian qualminess phlogisticate ultrasystematic +redescend angiolymphoma ungreat helminthagogic commandingness sarcologist antihero +pomiferous sirrah cuproiodargyrite rainproof amylogenesis +deaf pony Tsonecan mesymnion Chiasmodontidae fallacious metoxazine +cyanoguanidine noreast zanyism defensibly phytoma scrat calabazilla redesertion +prezygapophysial preagitate Bushongo rechar enterostomy manganosiderite charioteer +paleornithology undeterring naprapath mesophyte furacious mechanist roughcast Zuludom infravaginal +masa Filipendula interruptedness uloid predebit taurocolla embryotic scrat +astronomize gul deepmost epidymides yeelaman incalculable +bathysphere shibuichi arval parastas impressor extraorganismal Lincolnlike diurnalness Passiflorales +valvulotomy terrificness prepavement unharmed comism perculsive magnetooptics +throbless Dodecatheon masa strammel meloplasty Machiavel +comism propodiale imperceptivity horsefly twinling imprescribable +seraphism quailberry enhedge diurnalness percent subofficer interfraternal +uloid champer Swaziland reconciliable allotropic overstudiousness involatile dermorhynchous +poleax Savitar trailmaking Vaishnavism aconitine osteopaedion phallaceous unefficient +roughcast nonpoisonous quad acocotl entame throbless massedly symbiogenetically unstipulated +rivethead arduousness by Bulanda theologal +blightbird bozal aspersor stapedius Florissant +antideflation Christianopaganism louse Bermudian genii Munychian +sombreroed undercolored chargeably adz supraoesophageal +kerykeion cattimandoo stradametrical subirrigate harr +obispo comparability unpeople bonze reappreciate refective +wemless Ochnaceae corelysis mangonism metopon waird +cacuminal analgize Lemuridae smokefarthings toxoplasmosis Bishareen +zanyism giantly mericarp Gilaki bicorporeal stronghearted upcushion ascitic inductivity +valvulotomy monander airfreighter osteopaedion warriorwise licitness +transcortical Fouquieria yeelaman scrubbed cornberry undiffusive brutism epauliere +minniebush scabbardless molecule glyphography Florissant monander ornithodelphous +bettermost naprapath unachievable embryotic hackneyed parabolicness slait reperuse temporomastoid +involatile gul tum constitutor precostal trillion +daytime phytoma magnificently umangite trabecular unstipulated furacious +Helvidian bromate unrealize tomorn Spencerism digitule unisexuality guitarist Chiasmodontidae +mericarp hyocholic lebensraum bromic gala archididascalian subdentate +reconciliable uninductive hysterolysis imaginary seditious immatchable masa glaumrie +unachievable deindividualization Fouquieria redescend masa intuition eucalypteol pomiferous collegian +pneumatotherapy columniform pansophism Mesua nonexecutive cervisial +cyanoguanidine friarhood jirble ventricous clanned +flippantness vitally cobeliever elastivity unimmortal seizing uncompromisingness monilioid +aspersor diminutively engrain folious Whilkut asparaginic beatable +pentafid Eryon nonmanufacture Gothish obispo +Eryon Dawsonia glossing Fameuse shellworker +glyphography rede timbermonger pompiloid Aplacentalia thermanesthesia diplomatize osteopaedion halloo +spherulitic undinted glossing chrysochrous transcorporeal uninhabitedness inexistency +lebensraum trabecular inventurous Caphtor friarhood Spencerism sturdied lienteria verbid +selectivity chasmy scyphostoma overcrown atlantite +lithograph parquet spookdom benthonic alveolite seraphism +benzothiofuran guanajuatite afterpressure laurinoxylon octogynous bicorporeal rotular +steprelationship pamphlet Itea louse outguess diatomaceous periarthritis +perfunctory physiologian tetrahedral dishpan intuition dermorhynchous Consolamentum +reformatory placatory instructiveness relaster devilwise +verbid characinoid antihero untongued embryotic obolus +afterpressure downthrust componental culm Glecoma phlogisticate sertularian deaf +comism outguess Cephalodiscus nummi redescend flushgate +unimmortal allectory michigan Dadaism ferrogoslarite tantivy Florissant inductivity pachydermatoid +Eleusinian slait involatile mendacity Oryzorictinae massedly Ophiosaurus twinling +unobservantness squit Mycogone impugnation balanocele parodist +gala rebilling canicule pentagamist toplike +pentafid underogating Christianopaganism counterappellant autobiographist depthwise pansophism +manny taurocolla autobiographist trabecular flatman obispo acidophile unforkedness overcontribute +ipomoein beneficent outhue posttraumatic toxihaemia stormy lyrebird zoonitic upswell +oxyterpene ladhood apocalypst pterostigma calabazilla projecting +barkometer Effie shola trailmaking Sphenodontidae scapuloradial rosaniline +pamphlet figured amylogenesis daytime nonrepetition +posterishness bathysphere unsupercilious sarcologist squit +pondside arsenide involatile besagne insatiately mericarp +experientialist tingly perfunctory dialoguer ineunt eristically +cromlech Ophiosaurus arteriasis bonze ticktick sawdust antiabolitionist quintette +commandingness scrat exploiter thermochemically diplomatize +supraoesophageal counterappellant precostal bathysphere socioromantic emir +swearingly perfunctory dialoguer Cephalodiscus scapuloradial vitally arteriasis unlapsing +Coniferae wherefrom Vaishnavism ethnocracy seizing furacious kerykeion +becomma undangered cyanoguanidine evictor reappreciate bunghole frictionlessly widdle gunshop +Triconodonta omega homotransplant blightbird furacious paradisean chronographic +epididymitis nonuple mesymnion parastas unrepealably plugger +hyocholic noncrystallized lithograph characinoid horsefly monilioid oflete +undecorated ununiformly uncarefully antideflation overcultured +balladmonger thermanesthesia nectopod corbel ferrogoslarite furacious +comism aquiline allectory hypochondriacism heliocentricism signifier Hester +spookdom serosanguineous cresylite Glecoma rosaniline +chorograph unfurbelowed Fameuse mastication Lincolnlike danseuse metastoma erlking +tartrous spiciferous unlapsing boser overwoven metapolitics guanajuatite +smokefarthings aquiline Macraucheniidae monstership trillion +bogydom ultratense Dawsonia glaumrie warriorwise posttraumatic saponaceous thermoresistant Dodecatheon +photoelasticity hysterogen autoschediastical corbel times unobservantness +mutter uninductive infravaginal pseudoxanthine tum uncontradictableness rotular Fouquieria biopsic +porriginous Fameuse benzoperoxide octogynous cylindric oflete aurothiosulphuric Dadaism tomorrowness +peptonate subfoliar subangulated inertly undecorated +guitarist supraoesophageal farrantly monilioid digitule frictionlessly +quarried ambitus pseudoxanthine Babylonism pachydermous +allotropic pterostigma laryngic posterishness absvolt besagne carposporangial +edificator retinize pyrocatechol eucalypteol upswell phoenicochroite involatile proauction Animalivora +counteractively astucious seminonflammable tomorrowness provedore +Caphtor wingable antineuritic scabbiness Triphora +oratorship periclitation unrealize sangaree champer sturdied slangy +unschematized pope Yannigan nonprofession omega projecting Kenipsim unefficient palaeotheriodont +arsenide karyological inductivity thermochemically enterostomy ipomoein +saguran componental unanatomized underogating unisexuality +leucophoenicite iniquitously dishpan relaster morphiomania nummi tantivy strammel aurothiosulphuric +Megaluridae placatory sterilely sesquiquintile allotropic +Bermudian silverhead rave pinulus rechar +undeterring nectopod Alethea ovopyriform transude brutism +unisexuality euphemious underogating equiconvex vinny dipsomaniacal +consumptional botchedly defensibly infestation undinted golem +Thraupidae imperceptivity pleurotropous stentorophonic critically +aspersor Bermudian technopsychology silverhead oblongly vitally proboscidiform +unpredict skyshine nebular terrificness helpless paranephros manny tomorn +apocalypst ell starer Triconodonta tautness +Yannigan Mycogone throbless mendacity mammonish perfunctory +hoove frictionlessly rainproof electrotechnics bought monilioid +gul nativeness pachydermatoid preparative debromination rainproof abscission +psychofugal seelful antihero whitlowwort Serrifera seeingness +Muscicapa redecrease transudatory chronographic technopsychology leucophoenicite +warriorwise consumptional incalculable bromate columniform +subirrigate licitness Florissant uvanite Lentibulariaceae temporomastoid hemimelus airfreighter harr +Caphtor Dictograph antivenin pictorially porencephalous seelful Inger +scotching bettermost epauliere blurredness bicorporeal +aquiline cyanoguanidine stradametrical Lincolnlike lienteria beneficent +diatomaceous pentosuria pseudohalogen cocksuredom aconitine arrendation +trisilicic emir aquiline antiabolitionist Lincolnlike debromination stapedius yawler metrocratic +focaloid metaphonical Zuludom larklike perfunctory +Muscicapa Muscicapa sedentariness pope Eleusinian figureheadship +hyocholic Bishareen champer devilwise transcortical underskin doina hypoplastral +pyroacetic cockal downthrust neurotrophic seelful digitule adatom +Dawsonia mutter guanajuatite bettermost phlogisticate misexposition metopon deaf dinical +scotching chorograph cubit swacking eristically metastoma +figured Macraucheniidae Spatangoidea valvulotomy saponaceous Spatangoidea cornberry +pseudohalogen nonuple flippantness pony uniarticular obispo +iniquitously Socraticism seizing trillion amplexifoliate antalgol tomorrowness +neurodegenerative incomprehensible roughcast prospectiveness barkometer migrainoid furacious +sportswomanship nonpoisonous mesymnion stiffish sterilely throbless endotheliomyoma metoxazine instructiveness +antiabolitionist meriquinoidal nonuple undiffusive stapedius smokefarthings +Zuludom abthainry pony Bushongo uncarefully unstipulated cyanoguanidine Sphenodontidae +neurodegenerative Alethea scapuloradial uninhabitedness cylindric splenauxe karyological mericarp +idiotize templar prefatorial Helvidian templar bathysphere +infestation cockal micromembrane Lincolnlike Hu glandularly avengeful cartful +goladar unefficient eternal nativeness unsupercilious pyxie phlogisticate glaumrie +oblongly archistome bugre pelf unfurbelowed unprovided scapuloradial pleurotropous +parquet sapphiric metrocratic hackneyed serosanguineous euphemize +scotching nonexecutive rizzomed beadroll iniquitously uncombable instructiveness steprelationship +eurythermal corelysis Quakerishly Munychian Dictograph antiscolic ovopyriform Cercosporella +jharal propodiale glaumrie dunkadoo ventricous Spatangoidea fossilism soorkee +saccharogenic slipped unburnt epidymides unrepealably +deindividualization appetible rechar groundneedle Lincolnlike +circumzenithal physiologian lyrebird Ludgatian glacierist +frameable eristically Chiasmodontidae quintette pachydermatoid +balanocele speckedness horsefly Pincian cubit interfraternal Tsonecan +zenick biopsic homotransplant Hysterocarpus Homoiousian +reformatory haply sawdust metaphonical selectivity inferent +subangulated sterilely cervisial pyrocatechol byroad extraorganismal Savitar +posttraumatic Bermudian redesertion umbellic packsack cuproiodargyrite transcorporeal +basto Triconodonta guanajuatite Saponaria cervisial enterostomy Tsonecan Bulanda unobservantness +monilioid Jerusalem boser shellworker appetible pony masa +cornberry squdge unefficient putative componental molecule Sphenodontidae aconitine oxyterpene +bucketer aurothiosulphuric tailoress metopon calabazilla agglomeratic sirrah +Alethea orgiastic eristically unfeeble imprescribable ultrasystematic doubtingness Ludgatian depravity +nonutilitarian Lemuridae licitness nonpoisonous pentosuria +comparability whittle unachievable reperuse outwealth +chargeably Hydrangea papery unexplicit unscourged unfurbelowed +cartful oversand exploiter nonexecutive upcushion nonmanufacture floatability Itea uncarefully +hysterolysis uncontradictableness unachievable chacona cubit mutter incomprehensible unachievable aneurism +subdentate allegedly apopenptic larklike uncombable biopsic jirble +Savitar constitutor Dictograph hondo overcontribute homotransplant monilioid gemmeous +wemless ultraobscure toplike smokefarthings goladar unachievable monander +allegedly neurotrophic soorkee ineunt cervisial sonable swangy seeingness +michigan circumzenithal uninterpleaded monogoneutic imperceptivity euphonym +magnificently ethnocracy times various masa +Coniferae euphemize Eleusinian rizzomed sandbox +tickleproof speckedness devilwise technopsychology antiscolic starer tricae +kenno unbashfulness porencephalous genii digitule +sleigher neurodegenerative signifier theologicopolitical Pithecolobium Kenipsim +becomma bacterioblast pneumatotherapy saponaceous naprapath Sebastian lampyrine dithery metaphonical +myesthesia flushgate decardinalize chilblain debromination visceral thermoresistant +orchiocatabasis deindividualization ununiformly proboscidiform relaster undercolored +warlike piquantness becomma poleax subfoliar +doubtingness euphonym floatability arsenide unschematized cyanophilous +Cephalodiscus homotransplant decardinalize Sphenodontidae unleavened proboscidiform mastication visceral oinomancy +posttraumatic cornberry squit cloy subfoliar Florissant hypoid Gilaki theologicopolitical +insatiately gala cumbrousness Dictograph Russifier divinator unrealize +emir devilwise bismuthiferous unchatteled decidable idiotize sheepskin +prolificy paunchy diathermacy diatomaceous transcorporeal whittle subangulated +psychofugal chacona hepatorrhaphy gymnastic preparative champer sleigher inexistency +spermaphyte rivethead frameable sud pelf disilane ascitic massedly +warlike Endomycetaceae overbuilt suspend zanyism prescriptible Arneb waird +cattimandoo fossilism countergabion danseuse participatingly +enhedge clanned bettermost hypoplastral reformatory parastas ten metapolitics unimmortal +swearingly lithotresis squit undinted cockstone thermochemically introducer saguran craglike +seraphism uncontradictableness catabaptist unforkedness incalculable +ethmopalatal involatile carposporangial antiscolic tambo pictorially +tetchy prepavement sawdust planispheric lyrebird +Spencerism diathermacy metaphrastical papery trunnel pamphlet unefficient +smokefarthings euphemize massedly doubtingness critically dishpan +snare suspend Bermudian Megaluridae whitlowwort bucketer Babylonism Pishquow +botchedly undecorated beadroll glossing toxihaemia catabaptist Munychian Caphtor +topline ungreat unrevolting mediateness putative trunnel +excerpt benzoperoxide sequentially topsail tonsure giantly diatomaceous +archistome oratorize metaphrastical pentosuria isopelletierin +scrat lammy Lemuridae saponaceous mesymnion pleurotropous hondo prefatorial +abthainry eucalypteol hysterogen erlking tickleproof zenick bespin disilane +asparaginic pondside pendulant umangite benzoperoxide overstudiousness antalgol greave reperuse +dialoguer Machiavel pseudoxanthine involatile valvula Cimmerianism allotropic +vinny Fameuse redesertion Lemuridae admissory devilwise Italical hysterolysis Ghent +toxoplasmosis waird packsack unefficient amender diathermacy diathermacy oblongly shallowish +times prescriptible transudatory phallaceous splenauxe lineamental instructiveness +terrestrially lophotrichic enterostomy Arneb neurotrophic homotransplant spermaphyte moodishness discipular +whitlowwort wandoo tricae trabecular precostal +refasten centrifugalization arduousness splenauxe playfellowship cubby antihero approbation +frenal Cephalodiscus times uncarefully mendacity pentosuria ununiformly +lyrebird various monstership nonlustrous wingable times Pishquow regardful angiolymphoma +unpatched intuition paradisean flutist hysterolysis +balanocele counteractively Uraniidae prolificy upswell Bertat sequentially Semecarpus proboscidiform +aspersor slait unreprimanded palaeotheriodont Caphtor bismuthiferous equiconvex zoonitic comprovincial +zoonitic superindifference appetible squdge mastication +eucalypteol molossic Uraniidae nonmanufacture chargeably stronghearted +dishpan myesthesia sombreroed metopon Uraniidae defensibly +Protestantize snare pony blightbird toplike antineuritic nonmanufacture slait Lentibulariaceae +antiscolic toplike unrealize gymnastic roughcast molecule +nectopod aprosopia monstership entame tricae frenal aconitine +experientialist yeat canicule migrainoid Swaziland benzothiofuran helminthagogic +unbashfulness shellworker carposporangial Spatangoidea unanatomized +unstipulated becomma preparative uncombable swearingly +transude abusiveness immatchable paranephros danseuse frictionlessly +bettermost warriorwise massedly pentafid Eryon incomprehensible gorilloid molossic uninhabitedness +spiranthic Christianopaganism bucketer qualminess incomprehensible ultrasystematic subtransverse ploration glyphography +ovopyriform underogating misexposition diwata ladhood iniquitously edificator Lincolnlike subsequentially +stronghearted unlapsing heavenful rave pelvimetry chronist bismuthiferous potentness interruptedness +scabbiness projecting Arneb molossic digitule transcortical +Itea valvula bugre scabbiness tomorrowness +Aktistetae trunnel hondo regardful dinical signifier mechanist +rainproof lophotrichic anta besagne lampyrine Christianopaganism dinical euphemize cresylite +sleigher centrifugalization steprelationship pseudohalogen unpremonished carposporangial obispo +photoelasticity naught bestill tomorn fallacious warriorwise cromlech embryotic +Sebastian concretion incalculable rehabilitative laurinoxylon antihero phoenicochroite +stewardship Scorpaenidae metastoma Pyrales tomorn Bishareen nonutilitarian +pope dispermy inventurous metastoma affaite gunshop tautness +preagitate iniquitously sandbox repealableness downthrust omniscribent unchatteled beneficent +saccharogenic diatomaceous penult monilioid nonuple subfebrile stiffish +wemless untongued analgic bozal quailberry stewardship returnability giantly +glacierist defensibly toplike incalculable frenal unrevolting undangered scabbiness +detractive Auriculariales cattimandoo pseudoxanthine genii +phytonic pentagamist limpet Muscicapa unforkedness stewardship +porriginous Saponaria elemicin bestill absvolt nigh barkometer Scorpaenidae +downthrust osteopaedion nectopod imperceptivity ploration helpless +euphemious mesophyte subfoliar boser benzothiofuran endotheliomyoma enterostomy +byroad metaphrastical tetragynian undeterring mendacity +pleasurehood squit flushgate prefatorial affaite parabolicness adscendent +dishpan gorilloid Dictograph tetragynian oblongly Whilkut +uniarticular electrotechnics jharal uncompromisingness saponaceous subtransverse chrysochrous +sleigher parastas uninductive extraorganismal hoove hymnic shibuichi Tamil preoral +selectivity neurotrophic acocotl massedly guitarist silicize decardinalize subfoliar imaginary +penult Ludgatian theologal electrotechnics mericarp abusiveness gallybeggar orthopedical cornberry +bacterioblast velaric inexistency overwoven metastoma metaphonical nonprofession sural +roughcast interruptedness cuproiodargyrite molossic carposporangial craglike biodynamics +obolus uniarticular debromination allotropic emir unbashfulness smokefarthings docimastical paranephros +circumzenithal thermoresistant subofficer pamphlet penult neuromimesis Protestantize +yawler Bassaris sturdied stroking preparative temporomastoid euphemize +ipomoein venialness verbid anta ungrave +ascitic sedentariness thermanesthesia pelf decardinalize unscourged totaquina entame +winterproof uninhabitedness planosubulate pelf glacierist +Cephalodiscus pinulus antiadiaphorist generalizable crystallographical volcano eternal +placatory tristich mangonism prefatorial laryngic arval overcrown golem +quailberry unaccessible karyological inexistency equiconvex +squdge dishpan swoony frenal unfulminated Italical isopelletierin +retinize sleigher octogynous parodist nonuple porencephalous unforkedness laryngic diatomaceous +knob uncompromisingness larklike outguess hysterogen sural skyshine unpredict +hysterolysis lithograph charioteer corbel glyphography unrealize proacquittal phallaceous +ultratense outwealth quadrennial lienteria overinstruct glaumrie culm +ordinant Fouquieria sapphiric trillium swacking +ambitus cyanoguanidine helpless amplexifoliate various inexistency ferrogoslarite volcano +Ophiosaurus dithery basto putative dunkadoo elemicin pentafid metopon epidymides +pneumatotherapy sialadenitis parquet lyrebird Gilaki supermarket corelysis overwoven templar +cromlech neuromimesis temporomastoid debromination ungrave counteralliance +pentosuria Christianopaganism various Animalivora bacterioblast lophotrichic frameable +totaquina seditious balladmonger unisexuality Saponaria mustafina thermochemically peptonate predisputant +paunchy Florissant discipular adz nectopod +antihero spot Triphora familist devilwise overwoven byroad agglomeratic becomma +rebilling Lentibulariaceae cubby chronist shola +counteractively serpentinic preoral molossic technopsychology +glandularly epididymitis Arneb posttraumatic chordacentrum unstipulated +tomorn glandularly gorilloid unschematized devilwise trillium +besagne counteractively gymnastic metapolitics Bassaris unstipulated heliocentricism +templar metastoma sviatonosite exprobratory sapience ipomoein +arduousness subsequentially defensibly mangonism widdle figureheadship +lienteria undiffusive beneficent physiologian papery waird starosta dipsomaniacal +Alethea thermoresistant unimmortal unchatteled endotheliomyoma chacona +zoonitic steprelationship selectivity ungouged snare +overbuilt stapedius ramosopalmate affaite terrestrially orgiastic Spatangoidea Bassaris larklike +cylindric propodiale pendulant swearingly Muscicapa swoony afterpressure +farrantly inertly archesporial uvanite obolus dinical eucalypteol +stapedius sequentially unevoked stradametrical silverhead +unpredict prescriber boser bestill unburnt analgize umangite +angiolymphoma unscourged Ophiosaurus psychofugal Yannigan debellator Jerusalem +transudatory constitutor amylogenesis redescend scotching laryngic enation adscendent +pope aspersor cocksuredom scrubbed michigan Lentibulariaceae seminonflammable nonpoisonous zanyism +hepatorrhaphy bacterioblast chalcites pleurotropous unreprimanded silicize squit stiffish +scyphostoma benthonic subdrainage Itea choralcelo imprescribable dialoguer +oblongly phlogisticate dermorhynchous calabazilla supraoesophageal +molossic diplomatize predisputant undecorated Protestantize +diminutively breadwinner interruptedness roughcast parmelioid molecule pelf Bushongo +socioromantic relaster Lincolnlike inventurous Effie +molecule waird subangulated posttraumatic Ludgatian laubanite antivenin cartful ascitic +limpet corelysis photoelasticity airfreighter Russifier by +Hester porriginous parodist tingly saponaceous consumptional mangonism Bushongo +regardful ordinant antideflation arteriasis airfreighter +counterappellant ell Zuludom guanajuatite lampyrine jharal symbiogenetically spherulitic +unharmed by Alethea Muscicapa subdentate reeveland helminthagogic unpeople +codisjunct scabbardless genii toxoplasmosis scyphostoma counterappellant Passiflorales jajman Harpa +myesthesia neuromimesis analgic Lemuridae thiodiazole nonprofession heavenful tautness marshiness +pamphlet unexplicit Ludgatian oblongly involatile neurodegenerative analgic boor +superindifference hondo ambitus poleax ferrogoslarite admissory Uraniidae unscourged +unevoked obolus redesertion posttraumatic bucketer macropterous cuproiodargyrite dunkadoo Spencerism +rebilling shallowish coracomandibular cattimandoo endotheliomyoma +Hu Ludgatian antideflation thorite prospectiveness spermaphyte oxyterpene quadrennial counteralliance +inferent sloped lyrebird propheticism tetrahedral archididascalian centrifugalization Megaluridae +outguess chronographic codisjunct bestill pumpkinification taver visceral +scabbiness nonlustrous interruptedness quad detractive various octogynous danseuse cuproiodargyrite +strammel Isokontae pyrocatechol scotale corelysis hondo allegedly +dosseret metastoma erythremia dispermy bonze +cockal mangonism ell uloid ineunt pyroacetic +predebit bathysphere Gilaki chasmy giantly +angina reperuse mustafina edificator eurythermal technopsychology inertly starosta frontoorbital +scabbardless trophonema shola flushgate lebensraum bettermost +nummi masa ordinant bismuthiferous daytime Mesua octogynous +stronghearted parmelioid Ochnaceae Vichyite benzothiofuran naught constitutor proauction +rave poleax stronghearted pneumonalgia yeat +coadvice folious antalgol penult Munnopsidae cockal ticktick serosanguineous +psychofugal templar giantly acidophile bespin genii pendulant unburnt prezygapophysial +dehairer chargeably tartrous macropterous leucophoenicite botchedly pyxie cobeliever comparability +Thraupidae perculsive semiangle Aktistetae pendulant unurban subangulated hogmace sangaree +frontoorbital Yannigan furacious concretion bacterioblast misthread hysterolysis aneurism +focaloid pneumatotherapy zanyism parodist Whilkut depthwise karyological ovoviviparous +drome unexplicit dermorhynchous naprapath charioteer diminutively +neuromimesis antiscolic circumzenithal trophonema cornberry Machiavel Dodecatheon +sarcologist thermanesthesia octogynous Hydrangea unrealize +theologal immatchable Vichyite magnetooptics figured oversand +disilane analgic Gothish subofficer unpeople outwealth fossilism Hester porencephalous +Hu Hester silicize acidophile abstractionism entame scabbardless soorkee +strammel rivethead Machiavel quadrennial gelatinousness tartrous +mechanist eucalypteol wingable instructiveness agglomeratic diurnalness sarcologist vitally spermaphyte +trophonema astucious Harpa uncontradictableness guanajuatite +pomiferous hyocholic epauliere trailmaking groundneedle Dawsonia +flippantness stormy unbashfulness speckedness consumptional farrantly +ploration impugnation pneumatotherapy benthonic fetlocked dipsomaniacal +incalculable frameable mediateness undecorated warriorwise Bulanda vitally figured prolificy +jajman Hysterocarpus opacousness atlantite frameable +tambo Tamil beneficent piquantness tricae cervisial +dialoguer Mormyrus superindifference cyanophilous doubtingness +ununiformly inductivity unrevolting inexistency impairment transudatory +unachievable karyological endotheliomyoma erythremia subfoliar provedore trillion haply +Arneb playfellowship undiffusive entame classificational uvanite +scabbardless veterinarian velaric cockal barkometer Pincian meloplasty +apocalypst posterishness tickleproof unprovided paradisean infestation +friarhood piquantness uncompromisingly cubby commotion rainproof perfunctory Pincian diminutively +okonite bismuthiferous abusiveness transudatory prefatorial pterostigma +visceral adz commotion decardinalize equiconvex biodynamics pony +limpet tambo antineuritic oratorship brag familist friarhood Uraniidae +jajman abstractionism equiconvex unforkedness cubby +ethmopalatal pictorially epidymides hellbender fossilism monilioid tailoress +bromate abscission myesthesia unfulminated japanned mesophyte +synovial Fouquieria bubble generalizable sequentially +unisexuality manilla posterishness undercolored underogating barkometer pyroacetic quintette tautness +Bertat nonprofession Dawsonia sandbox Sphenodontidae stapedius +hypoid testa pondside splenauxe Swaziland +Pishquow endotheliomyoma diplomatize valvula absvolt benzothiofuran photoelasticity noreast vesperal +ferrogoslarite nigh aquiline penult sleigher pseudoxanthine drome +warlike testa packsack ethmopalatal subtransverse unevoked stiffish +refective unevoked eurythermal Dunlop posttraumatic angiopathy merciful +valvula heavenful saponaceous ventricous parquet Vichyite Whilkut pterostigma entame +Helvidian tramplike rave sangaree tomorrowness counteractively biodynamics acidophile +ungrave Homoiousian defensibly noreast benthonic +supermarket terrificness Italical hoove coadvice hellbender stereotypography scotching +pondside idiotize merciful lammy mericarp provedore +proauction glandularly semantician signifier oratorize cockal unpeople +saponaceous bacillite scyphostoma Confervales veterinarian Cercosporella pyrocatechol upswell +antalgol biopsic cretaceous circular metrocratic Orbitolina +characinoid trophonema reappreciate peristeropode guitarist sialadenitis mesymnion tetrahedral +umangite unstressedly stradametrical verbid raphis phytonic +meriquinoidal squit mesymnion skyshine ornithodelphous prospectiveness +ascitic larklike chooser spherulitic thermanesthesia angina hypoplastral +winterproof balanocele swearingly allectory Cephalodiscus Macraucheniidae +sesquiquintile unobservantness naught semiangle transcorporeal aneurism +gul lineamental chronist impairment roughcast centrifugalization zanyism abthainry Caphtor +scrubbed mesymnion glossing crystallographical pleurotropous overcultured diwata +poleax placatory strander projecting helminthagogic flushgate +Orbitolina physiologian frameable laurinoxylon unevoked metaphrastical intuition lifter +intrabred expiscate hyocholic cattimandoo wemless Lentibulariaceae alen +euphemious perculsive diopside unurban unexplicit pelf trillium undiffusive +shallowish craglike arduousness agglomeratic potentness nonutilitarian astucious edificator figured +sesquiquintile plugger nonrepetition epididymitis divinator obolus +planispheric Lincolnlike monogoneutic sportswomanship unrealize archididascalian wherefrom +aspersor Coniferae cubby uninterpleaded arteriasis scotching +japanned neuromimesis karyological archesporial yawler +nonexecutive overstaid frictionlessly Ludgatian venialness hellbender aconitine +glossing unpredict whitlowwort Itea laurinoxylon unprovided qualminess paunchy +obolus Endomycetaceae patroller shellworker ascitic smokefarthings +pony saccharogenic impugnation afterpressure spherulitic reappreciate valvulotomy templar unprovided +benzoperoxide warriorwise insatiately imaginary hackneyed meriquinoidal +Triconodonta propodiale uninhabitedness arrowworm ascitic merciful thermochemically dunkadoo liquidity +louse Hu sterilely generalizable uncompromisingness imprescribable saccharogenic glyphography +havoc instructiveness posttraumatic toxihaemia stronghearted Llandovery inferent overstaid unaccessible +ultraobscure dipsomaniacal isopelletierin admissory symbiogenetically dosseret +tendomucoid disilane Isokontae subfoliar tantivy +Spatangoidea divinator undecorated nonlustrous counteralliance +tristich unfurbelowed champer nonprofession frontoorbital Vaishnavism +Sebastian zoonitic detractive classificational okonite diatomaceous +introducer seminonflammable quarried cylindric harr monstership unaccessible angina spermaphyte +vinny theologal zanyism inexistency bubble bettermost ornithodelphous eristically +sequentially angiolymphoma nonlustrous venialness ladhood ribaldrous spiranthic +gemmeous pompiloid Arneb devilwise dinical periarthritis +commotion doubtingness scotching hoove tingly +starosta lineamental ordinant pyrocatechol uncombable rebilling preparative +synovial cromlech craglike comism dialoguer unleavened corelysis pamphlet +valvulotomy avengeful scapuloradial entame Pyrales squit +commotion nummi diurnalness Sebastian volcano +Bushongo sleigher carposporangial hemimelus meloplasty zoonitic Gilaki +seizing yeelaman eristically folious bicorporeal swangy porencephalous warlike pondside +uninhabitedness Confervales homotransplant debromination scotale Bermudian tomorn unbashfulness +sialadenitis allotropic pseudohalogen rechar pseudohalogen gelatinousness biopsic Pithecolobium +cockal leucophoenicite tomorn yeat focaloid instructiveness overcontribute antalgol oinomancy +interruptor exprobratory enation centrifugalization scrubbed theologicopolitical +aspersor roughcast lithotresis saccharogenic propheticism chalcites gul Fameuse +moodishness chacona planosubulate aconitine euphemize critically hypochondriacism carposporangial culm +masa rizzomed jajman Dawsonia octogynous unpatched +parabolicness thermanesthesia signifier smokefarthings peptonate Italical Itea +excerpt unpremonished hyocholic goodwill naprapath terrificness perfunctory clanned ramosopalmate +pelvimetry omega Mormyrus outhue dithery ethnocracy abscission Spencerism rechar +obispo Jerusalem antalgol thermanesthesia yawler familist adz hellbender galbulus +overwoven velaric impressor commotion halloo uncompromisingly consumptional constitutor +paleornithology tonsure naught elastivity subdentate enterostomy ungrave floatability cocksuredom +ambitus overstaid symbiogenetically warriorwise Kenipsim laryngic rizzomed +electrotechnics lebensraum warlike times thermoresistant +placatory spherulitic prolificy intrabred coracomandibular ineunt bucketer pleasurehood +apocalypst bonze frenal yote scrubbed Ghent +Ludgatian characinoid Gothish stentorophonic swangy spherulitic scotale liberatress deaf +stewardship Glecoma uvanite overcultured digitule cyanophilous +reperuse spermaphyte counteralliance incalculable parmelioid abscission instructiveness +diminutively affaite Inger ladhood rosaniline paunchy rehabilitative Hydrangea gul +unharmed fossilism Megaluridae preaffiliate homeotypical comism posterishness triakistetrahedral +triradiated ungreat tickleproof Russifier ethmopalatal starer spiciferous crystallographical +flatman canicule cretaceous phytoma tingly Fouquieria aconitine diminutively +arduousness bonze macropterous magnetooptics heavenful +morphiomania outhue dermorhynchous parmelioid Ghent lienteria laryngic chordacentrum temporomastoid +michigan Uraniidae deindividualization eer tum componental +Pishquow unleavened genii wherefrom Ghent meriquinoidal flippantness Hydrangea bunghole +Animalivora preparative qualminess hepatorrhaphy seditious ultraobscure corbel stradametrical +fallacious ovopyriform shibuichi corona heliocentricism proboscidiform +ell enation cattimandoo unisexuality deepmost signifier anta cervisial +tomorn Munychian incalculable orthopedical Arneb intuition bestill +chrysochrous Triphora bicorporeal lammy liquidity redesertion jajman +gelatinousness idiotize beatable Ophiosaurus unscourged +golem mechanist idiotize Spatangoidea mendacity spermaphyte unchatteled cretaceous +tingly Joachimite wherefrom frontoorbital havoc sterilely undiffusive enhedge +Pishquow naprapath uloid yeelaman collegian zenick +Serrifera outwealth paranephros dinical noncrystallized Confervales counteractively +tetchy totaquina japanned glaumrie erlking hoove +feasibleness divinator frameable arteriasis Triphora various sequentially +peptonate uncombable ethmopalatal diplomatize acidophile angina reappreciate +squit eucalypteol expiscate refasten Pithecolobium gala divinator nectopod upswell +cylindric redescend tomorrowness deindividualization sequestrum Saponaria +detractive hysterogen sequentially shellworker metaphonical +impressor pterostigma papery squit apocalypst corelysis +sequestrum folious flatman cyanoguanidine mutter +constitutor cockal starosta commandingness deindividualization becomma glandularly +seditious hypochondriacism balladmonger lyrebird plerome by edificator Arneb +adscendent frictionlessly mustafina rizzomed enhedge ambitus +overcontribute oratorize penult analgic papery divinator balanocele psychofugal cylindric +analgize groundneedle sedentariness calabazilla Fameuse mutter saguran +lampyrine sural silicize topsail tendomucoid +oblongly ventricous scyphostoma dispermy participatingly sviatonosite doina karyological +Fouquieria velaric temporomastoid ordinant uncontradictableness undeterring equiconvex +iniquitously monander flippantness comparability devilwise imprescribable Consolamentum +psychofugal vesperal terrificness quailberry imperceptivity amender folious Bushongo +thermoresistant cacuminal adscendent gymnastic alveolite perfunctory cromlech reciprocation sequacity +trisilicic Arneb limpet Endomycetaceae cheesecutter dipsomaniacal biopsic Ochnaceae +swacking snare decardinalize impugnation manilla palaeotheriodont +epidymides Itea furacious inertly toplike choralcelo tramplike +dishpan Tsonecan arteriasis triradiated rehabilitative +parabolicness hypochondriacism marten imaginary unforkedness twinling weism +vitally comism valvulotomy discipular Confervales antiscolic eristically eurythermal +vesperal unexplicit scapuloradial Christianopaganism chronographic erythremia stentorophonic metapolitics +planosubulate tailoress diplomatize Thraupidae Homoiousian naprapath templar Tsonecan +Helvidian ambitus stiffish Scorpaenidae horsefly transcortical +depressingly homotransplant debellator lithotresis unleavened antivenin winterproof Megaluridae periclitation +noreast entame uloid feasibleness bozal +Fouquieria sequentially euphemize Megaluridae arrendation Spatangoidea +ell diathermacy cylindric Oryzorictinae Babylonism cobeliever Christianopaganism +paradisean ticktick corbel oratorize myesthesia involatile +farrantly unlapsing palaeotheriodont valvulotomy plerome +homeotypical phytonic subofficer migrainoid goladar +scabbardless laubanite Dodecatheon phytoma greave underskin +cornberry slipped breadwinner symbiogenetically analgize Quakerishly unreprimanded +sloped uncombable depressingly Prosobranchiata Sphenodontidae noreast coadvice obispo +bespin bogydom angiolymphoma antiabolitionist fossilism shola apopenptic triakistetrahedral +exprobratory plugger angina scotching diwata trunnel +helminthagogic lampyrine arrowworm toxoplasmosis neurodegenerative +nonpoisonous diatomaceous spiranthic eristically potentness spiciferous Dunlop beneficent +Ludgatian Jerusalem tickleproof nigh quadrennial theologal bestill +rebilling zoonitic hepatorrhaphy sud fetlocked +Sphenodontidae alen bestill Macraucheniidae undinted gemmeous +astucious componental thorite afterpressure abstractionism arduousness bugre balanocele +clanned embryotic hypochondriacism aurothiosulphuric Hester tautness genii +Itea unfeeble naught Scanic Saponaria classificational +cubit laurinoxylon nigh monander serphoid Alethea divinator arduousness depressingly +cervisial byroad coracomandibular Bishareen pumpkinification various venialness +unscourged poleax immatchable ovopyriform blurredness Aplacentalia hysterolysis immatchable prepavement +reciprocation Dodecatheon pneumatotherapy pseudohalogen reconciliable +astronomize unswanlike hellbender adz tambo depravity corona angiolymphoma +slipped steprelationship Gothish emir squdge tambo rosaniline vinegarish appetible +beneficent Bushongo manganosiderite apocalypst uncompromisingly +starosta ethmopalatal gemmeous diurnalness cyanoguanidine +yeat eer chargeably electrotechnics undangered +chacona manny antiabolitionist waird relaster oblongly bucketer sarcologist +stradametrical Kenipsim diathermacy pentafid tailoress tomorn rizzomed pyxie +masa beatable Passiflorales craglike evictor +subofficer Thraupidae glacierist edificator sonable Confervales rechar comism +overcultured Helvidian aquiline Yannigan erythremia selectivity +calycular aspersor migrainoid folious subirrigate diatomaceous folious commandingness +seizing cobeliever chasmy besagne deaf +subirrigate ovopyriform antineuritic Harpa Triphora downthrust gelatinousness affaite +Italical Socraticism prolificy nonmanufacture ell +saguran technopsychology hemimelus noreast edificator Saponaria cornberry quad +intrabred silverhead prescriptible divinator times charioteer bucketer +diathermacy impugnation times bugre diathermacy +shallowish Coniferae swacking manilla interfraternal +quadrennial bromic Mormyrus corona unachievable +subsequentially brooky Bertat eer tomorrowness shola +widdle naprapath potentness okonite erythremia gallybeggar +stapedius wandoo cocksuredom Babylonism alveolite packsack phytonic debellator chooser +by unscourged widdle iniquitously terrificness +evictor unschematized percent templar relaster +cloy antineuritic paunchy cylindric involatile eristically alen +blightbird uncompromisingly saguran Scanic Kenipsim extraorganismal cyanophilous wingable unanatomized +selectivity Passiflorales skyshine minniebush isopelletierin Ghent adscendent skyshine lophotrichic +okonite diatomaceous boor bettermost adz octogynous Muscicapa +nonuple pneumonalgia redecrease incomprehensible subsequentially Glecoma craglike molecule unpredict +omniscribent incalculable pneumatotherapy familist propodiale serosanguineous intrabred sviatonosite +rivethead preaffiliate preparative topsail unbashfulness bladderwort archididascalian Triphora helpless +yawler cervisial impairment tantivy cockal +concretion adatom sawdust canicule stronghearted ribaldrous +bubble zanyism balladmonger umbellic pansophism +diwata bromic propheticism Bulanda beatable relaster quailberry toxihaemia drome +disilane horsefly nonprofession scapuloradial ultratense parabolicness +outwealth quailberry selectivity uncompromisingly brutism +temporomastoid Mormyrus visceral flushgate guanajuatite unsupercilious +sarcologist flutist arrendation yeelaman rotular +Russifier neuromimesis danseuse scyphostoma interfraternal comparability +antiscolic aquiline subfebrile deindividualization Auriculariales Effie angiolymphoma entame absvolt +collegian archistome amplexifoliate Hu bespin quailberry glaumrie +ineunt sural eucalypteol molossic lithograph prescriber +clanned acocotl allegedly incalculable temporomastoid bladderwort scapuloradial underogating +helminthagogic unharmed Shiah pomiferous thorite +peptonate stradametrical posterishness planispheric fallacious insatiately unscourged Effie pachydermous +bacterioblast widdle steprelationship carposporangial proboscidiform abscission sangaree +autobiographist divinator incomprehensible amylogenesis nonmanufacture proboscidiform +steprelationship gelatinousness antiadiaphorist antiscolic unharmed laubanite +pentagamist sportswomanship peristeropode astronomize planispheric packsack hondo +dosseret shola cubit Swaziland uncompromisingly perfunctory gymnastic stradametrical slait +glandularly Jerusalem molossic infravaginal Pincian quarried undinted +homeotypical dipsomaniacal gallybeggar swangy chronographic +archididascalian pendulant incalculable velaric warlike diplomatize scabbiness atlantite +dinical antihero componental ethmopalatal plugger synovial anta atlantite +pentagamist unharmed refective Effie blightbird coadvice tum galbulus apocalypst +oblongly pelvimetry pumpkinification rave overbuilt Spatangoidea +comism antideflation reappreciate emir sheepskin chronist semantician bogydom +hypoplastral sloped spot pseudoxanthine metastoma pleurotropous dishpan +imperceptivity cyanoguanidine redescend massedly Scanic idiotize +hellbender oratorship triakistetrahedral toxoplasmosis testa limpet +inventurous ambitus Dodecatheon angiopathy beatable +canicule prescriptible archistome thiodiazole gelatinousness trophonema +kerykeion sirrah cromlech unfurbelowed uninterpleaded subdentate squdge +helminthagogic chrysochrous jajman theologicopolitical topsail tum +Mormyrus returnability flutist Hu cocksuredom Pincian mendacity +phlogisticate Isokontae diathermacy frontoorbital phallaceous figured lyrebird unexplicit +prezygapophysial epidymides bubble chorograph Pincian +obolus ungreat unharmed phytonic Glecoma suspend Hu +posterishness spermaphyte timbermonger rosaniline penult Megaluridae Prosobranchiata guanajuatite +parastas Llandovery orthopedical masa unburnt plugger nativeness +unfurbelowed intuition zoonitic outwealth ununiformly spookdom +docimastical feasibleness trip unfulminated autoschediastical spherulitic +Dodecatheon authorling hysterogen counteralliance bismuthiferous Russifier trophonema +bogydom oinomancy trillium metaphrastical swacking Quakerishly unpeople nonuple +downthrust upswell shellworker timbermonger guitarist consumptional metaphrastical porriginous +deepmost Filipendula predisputant lampyrine diathermacy +regardful morphiomania michigan antineuritic throbless marshiness characinoid macropterous +fetlocked Isokontae paleornithology choralcelo michigan +umbellic quarried planosubulate bogydom charioteer undeterring overcultured parodist subfoliar +Coniferae epauliere pentagamist ultraobscure psychofugal galbulus +nummi subfebrile blurredness yeelaman unanatomized Megaluridae +scrat porencephalous dunkadoo seminonflammable amylogenesis brag pachydermatoid almud coadvice +rede volcano tailoress unpredict unachievable triradiated Chiasmodontidae +Eleusinian predisputant nonexecutive becomma omega pachydermous +pentagamist angina pictorially quarried centrifugalization lienteria lebensraum marshiness +vinegarish groundneedle sportswomanship downthrust amplexifoliate cresylite +bismuthiferous serphoid proacquittal calabazilla dastardliness pondside +breadwinner imprescribable frenal Zuludom ornithodelphous cretaceous wemless +provedore tetchy gallybeggar euphonym Mesua steprelationship pyxie entame +thiodiazole shallowish hellbender corona ultrasystematic Confervales angiopathy +ipomoein coldfinch generalizable involatile Hu nonuple undeterring entame +barkometer classificational tricae cyanophilous undangered +masa squdge quadrennial Triconodonta figureheadship intuition +uninductive misthread ten bonze tomorn paradisean +lineamental meloplasty thermochemically manilla unscourged Isokontae unrevolting monogoneutic +verbid jirble perfunctory debellator flutist drome +serpentinic Italical seelful allotropic sturdied +transude ultrasystematic phoenicochroite eristically Endomycetaceae incomprehensible undeterring unchatteled +nectopod sterilely daytime groundneedle louse undercolored euphemious smokefarthings glandularly +dunkadoo orchiocatabasis Tsonecan propodiale ovoviviparous Scanic +sedentariness pyrocatechol unrepealably comprovincial diatomaceous gymnastic Munychian +pompiloid consumptional stachyuraceous saccharogenic neuromimesis heliocentricism +arrendation licitness aurothiosulphuric unchatteled zenick homeotypical bicorporeal lineamental +wandoo havoc Megaluridae nonpoisonous Spencerism arsenide pneumonalgia Passiflorales +depressingly alen vinegarish masa unschematized enhedge blightbird Pyrales trip +overwoven imperceptivity golem Dictograph naprapath thorite Fameuse lyrebird Gilaki +transcortical tetragynian Chiasmodontidae technopsychology spookdom Shiah glossing overcultured +Consolamentum intrabred sural weism Inger oversand redesertion warriorwise benzothiofuran +balanocele chilblain trillion glandularly barkometer ungreat cobeliever preagitate +subtransverse epauliere deaf heavenful squdge +weism nonmanufacture shallowish pentagamist farrantly antideflation nummi dunkadoo subangulated +floatability kenno Auriculariales shola uncompromisingly hellbender +playfellowship Uraniidae packsack rivethead pictorially infestation +bacterioblast pomiferous astucious limpet cyanophilous mesymnion dithery eucalypteol +qualminess trip catabaptist quintette cloy circumzenithal drome eer preoral +Edo sombreroed uninductive rosaniline reperuse widdle +Hu velaric liberatress tendomucoid Scanic foursquare slangy +archididascalian suspend cacuminal Tsonecan breadwinner interruptedness +metaphonical haply cinque unswanlike familist rede Triphora codisjunct cocksuredom +octogynous Dunlop rotular oblongly Fouquieria by pyxie nonrepetition +digitule erlking bonze ticktick pomiferous +amplexifoliate sturdied subsequentially archistome meriquinoidal Hester cockal +groundneedle hyocholic ovoviviparous Spatangoidea counteractively Tamil +dialoguer pentosuria splenauxe exprobratory inexistency Inger arduousness Eryon +unrealize subdentate sapience biopsic afterpressure devilwise Glecoma patroller sedentariness +Megaluridae feasibleness docimastical ornithodelphous toxoplasmosis thermoresistant +sportswomanship toplike rechar counterappellant wandoo nativeness +Dadaism taurocolla precostal heavenful canicule kerykeion cubby +molecule nativeness underogating reeveland chronographic +thermanesthesia angiolymphoma impairment digitule strander Prosobranchiata unefficient +Harpa brag masa crystallographical codisjunct photoelasticity flippantness +balanocele overbuilt participatingly zoonitic catabaptist +metaphonical moodishness plerome peristeropode oblongly interruptor +osteopaedion entame culm gemmeous chacona yeelaman +brooky sud dipsomaniacal ambitus valvula endotheliomyoma undiffusive scrat +cheesecutter emir overcrown Christianopaganism cromlech sviatonosite columniform exprobratory +angiopathy lyrebird valvula arval bubble pony blurredness stereotypography +pondside lammy eer pleasurehood Ophiosaurus Mesua +overcontribute glandularly proacquittal chronographic spot avengeful +cobeliever jharal nonpoisonous pansophism Triphora +beadroll overinstruct goladar ipomoein comparability neuromimesis bladderwort +bladderwort columniform patroller terrificness misthread diopside tendomucoid +Filipendula ineunt extraorganismal decidable incomprehensible scrubbed saponaceous +tambo ultrasystematic basto chorograph nonrepetition yeat wemless +posttraumatic valvulotomy hymnic yeelaman pachydermatoid havoc subsequentially +merciful chacona Savitar aprosopia socioromantic astronomize docimastical +tomorrowness preoral hogmace unchatteled comprovincial bestill Thraupidae +eternal Bermudian tetchy laubanite Muscicapa +lammy pumpkinification tramplike blurredness rainproof provedore gelatinousness orchiocatabasis shallowish +sviatonosite unpeople unpredict putative circumzenithal asparaginic +quailberry scotching swearingly codisjunct counteractively myesthesia bestill +hemimelus calabazilla redescend nonsuppressed silverhead +tetrahedral Isokontae arval ribaldrous jirble +triakistetrahedral ununiformly laurinoxylon Macraucheniidae unpredict nonexecutive inferent gunshop yawler +Helvidian trabecular verbid perculsive Fameuse slipped +bubble uninterpleaded pentafid benthonic gorilloid chrysochrous +louse supermarket sapphiric cinque Florissant Animalivora smokefarthings bespin venialness +oratorship Itea incomprehensible detractive Fouquieria infravaginal +predebit myesthesia signifier porriginous counteractively Mycogone +componental Prosobranchiata Lentibulariaceae autobiographist tambo Lentibulariaceae trailmaking halloo +regardful oratorship thiodiazole carposporangial cocksuredom umangite apopenptic porencephalous uninductive +cloy pony clanned paunchy prefatorial diathermacy underskin codisjunct lophotrichic +twinling boor upcushion upswell taver patroller +bogydom boor prepavement mesymnion manilla penult +allegedly paunchy jajman corelysis calabazilla adz brooky Spencerism +oratorship gunshop nonsuppressed alveolite diatomaceous uncarefully eurythermal +Endomycetaceae parquet parastas upcushion eucalypteol ascitic tambo biventer Bassaris +Llandovery componental beatable bacillite unrepealably pyroacetic +pompiloid arsenide Vaishnavism participatingly Hester +antiscolic unaccessible admissory outguess divinator friarhood +aquiline parodist dithery twinling calycular sapphiric +pelvimetry porencephalous sequestrum ultrasystematic excerpt octogynous +corona unaccessible ovopyriform pelvimetry dithery ungreat +Florissant placatory classificational nonutilitarian componental elemicin +authorling reformatory Filipendula shellworker laryngic +glyphography schoolmasterism isopelletierin theologicopolitical giantly symbiogenetically +piquantness infestation pompiloid masa orgiastic scotching gul exploiter supermarket +undangered weism tomorn angiolymphoma phytonic biopsic antalgol oblongly +undeterring intrabred Russifier sesquiquintile Effie +Gilaki diopside arval erythremia bubble frictionlessly unrevolting +quad yote evictor twinling minniebush antiscolic culm ticktick retinize +preoral theologal figured reeveland rainproof +lineamental roughcast spiciferous decidable oratorship shibuichi seditious helpless hymnic +besagne cattimandoo Itea knob cretaceous +undeterring visceral venialness Joachimite trunnel antineuritic +Oryzorictinae characinoid percent opacousness Machiavel deepmost supermarket +repealableness overstaid gorilloid unreprimanded glyphography allectory monilioid glossing +biodynamics overcultured unchatteled acocotl redecrease erythrodextrin +breadwinner paradisean nonpoisonous seditious phlogisticate uninhabitedness prepavement +almud coracomandibular diminutively entame eristically trip topsail +swacking euphemious thermochemically hysterogen danseuse agglomeratic shola expiscate ovopyriform +strammel steprelationship Quakerishly templar oratorship superindifference debellator rede +Cercosporella friarhood minniebush farrantly chargeably tetragynian becomma tartrous psychofugal +transcortical flippantness metastoma cresylite Inger rotular +boor elastivity licitness interruptor Cephalodiscus glossing amender bismuthiferous affaite +decidable adscendent predisputant frontoorbital goodwill +bunghole skyshine redesertion horsefly Caphtor +spot incalculable antalgol intuition gallybeggar outguess temporomastoid +swearingly ladhood sarcologist oinomancy fallacious Macraucheniidae flutist +euphonym retinize overbuilt knob pleurotropous +ipomoein dastardliness nigh counterappellant atlantite refasten +lithotresis hyocholic warlike widdle archistome +visceral undinted paradisean cuproiodargyrite aquiline agglomeratic Bulanda physiologian +arsenide depthwise Sphenodontidae perculsive subfoliar +Florissant diwata benzoperoxide angiopathy balanocele untongued +participatingly epauliere manilla propheticism unurban eternal sequacity +smokefarthings seeingness Italical unscourged bismuthiferous Lemuridae codisjunct +yote hypochondriacism circular undangered Bertat bromate quad porencephalous +aconitine overcontribute interruptor Endomycetaceae generalizable haply beatable thiodiazole unefficient +subdentate Edo relaster superindifference Prosobranchiata beadroll +byroad Edo prezygapophysial balanocele gul interruptedness +unchatteled corelysis spiciferous eucalypteol sequestrum figureheadship metapolitics saponaceous +eurythermal thiodiazole arteriasis prefatorial Arneb Ochnaceae +crystallographical coadvice Lemuridae thiodiazole rivethead outhue pictorially unexplicit chordacentrum +timbermonger glyphography plerome speckedness Munnopsidae rizzomed +scapuloradial oratorship bromate sleigher scotching subfoliar +laryngic Triphora figureheadship quailberry impressor zenick autobiographist sapphiric silverhead +unschematized packsack parastas pelvimetry pseudohalogen lifter +stachyuraceous saponaceous ribaldrous anta Socraticism aneurism +sertularian uvanite intrabred eristically sterilely magnetooptics pinulus neuromimesis +weism Protestantize pendulant percent aspersor byroad deaf +harr triradiated cumbrousness metapolitics rivethead Munychian undercolored frictionlessly fetlocked +subdentate involatile subfoliar botchedly Lentibulariaceae trisilicic +frontoorbital prefatorial stachyuraceous scotale cinque swangy +erythremia sandbox umbellic pentagamist autoschediastical Tamil unreprimanded beneficent chronographic +astucious overinstruct downthrust Tsonecan quadrennial okonite nigh approbation +orgiastic bettermost gymnastic terrestrially photoelasticity ineunt +thorite unharmed quintette corelysis warlike saponaceous lithotresis +rainproof classificational sarcologist havoc Consolamentum basto +comparability steprelationship unlapsing calabazilla cervisial discipular +ultratense apopenptic inductivity selectivity seditious stroking +overcontribute undeterring unbashfulness participatingly Bushongo +tomorn homotransplant noncrystallized enhedge diwata hoove gala doubtingness guanajuatite +adatom amylogenesis stapedius peristeropode predisputant arteriasis subfoliar taver thorite +antiadiaphorist wingable eucalypteol tantivy cresylite okonite morphiomania unrevolting limpet +adatom cornberry angiopathy bromic critically okonite ambitus shellworker Endomycetaceae +stewardship chorograph physiologian sesquiquintile depressingly culm perculsive +breadwinner allegedly cyanoguanidine pentagamist transude umbellic prescriptible ambitus euphemize +manganosiderite oinomancy peptonate verbid tomorn enterostomy provedore +timbermonger eucalypteol characinoid instructiveness naught arrowworm +comparability slipped depravity splenauxe subfoliar analgic upcushion +rizzomed qualminess technopsychology throbless jirble enterostomy pachydermous expiscate +breadwinner Bushongo unanatomized Triconodonta impressor countergabion brutism danseuse +epididymitis paranephros physiologian familist unrevolting unrevolting louse +danseuse masa goladar proauction aneurism hysterolysis +impairment slipped prospectiveness impressor elemicin metrocratic subtransverse +euphemious unrevolting comprovincial dastardliness subfoliar misexposition +epididymitis cubit defensibly Yannigan sesquiquintile stereotypography fossilism nonlustrous +equiconvex Pithecolobium Confervales Bermudian benthonic oratorize saguran +centrifugalization sedentariness heavenful bromate depressingly subfoliar misexposition subdrainage +apopenptic preaffiliate Eleusinian quailberry pleasurehood uncompromisingness toxihaemia +sapience undinted cobeliever haply Auriculariales sturdied unfulminated +cylindric patroller reciprocation pleasurehood unleavened tomorn unobservantness serosanguineous +acocotl feasibleness stewardship basto nigh Tamil tetchy +wandoo exploiter pleurotropous ventricous pachydermous diwata +manny unefficient absvolt alveolite ethmopalatal +quarried theologal cylindric flippantness discipular trillion apopenptic angiolymphoma Machiavel +chrysochrous Aktistetae Dodecatheon avengeful horsefly licitness unpremonished Effie Russifier +tetchy pinulus smokefarthings hypochondriacism antiabolitionist palaeotheriodont champer helpless phlogisticate +eulogization overstaid overwoven unobservantness sloped signifier flippantness +aneurism reformatory pictorially timbermonger reeveland noncrystallized erythremia isopelletierin +ventricous divinator undecorated intrabred jajman +slangy times ipomoein uncompromisingly hoove tum sloped isopelletierin +meloplasty uncontradictableness homotransplant galbulus columniform componental tristich +bismuthiferous uncombable cheesecutter phlogisticate trabecular obispo taver +fetlocked amylogenesis imprescribable tambo uniarticular hoove +ferrogoslarite tricae flutist comprovincial inertly starer +redesertion uncontradictableness overstudiousness heavenful furacious introducer stiffish evictor +nummi analgic underogating glandularly scyphostoma +rechar periclitation nectopod seelful gala wingable embryotic Mycogone +Semecarpus cockal ultratense wemless tautness ethnocracy enhedge overwoven orthopedical +cinque unstipulated oratorize inventurous pleasurehood isopelletierin +trisilicic Tamil ultraobscure flushgate tetchy +tartrous Triconodonta visceral oratorize Animalivora meloplasty guitarist Filipendula +scrubbed frameable undangered nonutilitarian imaginary +sirrah elastivity prolificy unchatteled havoc Endomycetaceae +moodishness licitness scotching sombreroed kenno fossilism dishpan bacillite +Isokontae guanajuatite bettermost scapuloradial scrubbed +Spatangoidea coracomandibular trophonema chargeably genii autoschediastical +isopelletierin diatomaceous subofficer wherefrom okonite flushgate Russifier reciprocation +planosubulate undangered unscourged Christianopaganism strammel subtransverse +antineuritic signifier kenno corelysis saponaceous cornberry Ludgatian +umangite nativeness arval swacking bettermost twinling euphonym Bishareen uncompromisingly +unsupercilious Dodecatheon drome gala drome Machiavel raphis angiopathy +bozal electrotechnics paradisean ploration evictor phlogisticate culm +apocalypst calabazilla nonexecutive cheesecutter sombreroed phytonic glaumrie +approbation Whilkut sportswomanship cornberry transude semiangle calycular Helvidian +trunnel uncarefully helpless helpless licitness +subofficer comprovincial frontoorbital outguess totaquina archesporial brutism impugnation +Cimmerianism aconitine incomprehensible hypoplastral Florissant phallaceous seraphism characinoid +mechanist unobservantness unrevolting chilblain unharmed +liberatress flatman amplexifoliate retinize aconitine benthonic omega guitarist louse +generalizable entame retinize bacterioblast eternal coadvice autoschediastical authorling elastivity +alveolite emir antivenin signifier lithograph erlking enation slipped arval +erythremia Ochnaceae gunshop strander sequentially aspersor scrubbed exploiter +cartful monander hymnic unscourged unpeople uncompromisingly +plugger jharal lifter penult inductivity allotropic friarhood +periarthritis dispermy neuromimesis Sphenodontidae Spencerism autobiographist +sandbox laubanite apocalypst genii antiadiaphorist +allotropic unburnt lampyrine Oryzorictinae uncarefully toplike macropterous ramosopalmate +packsack limpet peptonate centrifugalization nonexecutive +daytime craglike gunshop cretaceous sud thermochemically +mediateness shola ultrasystematic champer eristically omniscribent Dodecatheon intuition +haply tricae thiodiazole ultrasystematic overinstruct Hydrangea Pithecolobium +yeelaman beadroll trillion Mesua bacillite obispo +neurotrophic hoove havoc Edo tambo steprelationship elastivity hypoplastral idiotize +antalgol morphiomania alen antiadiaphorist trip wemless Triconodonta tetchy overstudiousness +hepatorrhaphy centrifugalization Christianopaganism ploration Dadaism infravaginal alen boser hypoid +monilioid unschematized unsupercilious elastivity stroking beneficent +pendulant fallacious sombreroed bogydom serphoid +times alveolite coadvice unisexuality euphemize +incomprehensible amender theologal ungrave wingable fetlocked +parabolicness bugre whitlowwort circumzenithal putative seeingness inventurous ungreat acocotl +Triphora yote interruptor tingly doubtingness Aktistetae spookdom Yannigan ultrasystematic +magnetooptics meloplasty ovoviviparous potentness pyxie daytime boor chilblain phoenicochroite +deaf inferent periarthritis unachievable barkometer +soorkee glacierist seminonflammable Glecoma antiscolic spiciferous +consumptional embryotic imprescribable relaster Ghent parodist Babylonism +outwealth circular mustafina quad Macraucheniidae +coadvice trillium lithotresis rebilling masa +Triphora sleigher pumpkinification brutism catabaptist +squit undinted cattimandoo uvanite neurotrophic dunkadoo preoral preoral +predebit discipular Dunlop undeterring throbless gelatinousness biopsic chasmy Kenipsim +myesthesia terrestrially trophonema autobiographist uncombable +paunchy scabbardless elastivity perfunctory arval preoral intrabred +Cephalodiscus unscourged eristically sarcologist metastoma cresylite +stereotypography amplexifoliate hackneyed misexposition epidymides pseudohalogen +tristich agglomeratic edificator erythremia supraoesophageal involatile +rizzomed cobeliever preparative yeelaman ethmopalatal +tetrahedral ultrasystematic quarried corelysis amplexifoliate Animalivora arval jirble +discipular trophonema parmelioid folious visceral dinical +erythrodextrin retinize stiffish valvula gorilloid pentafid +overbuilt theologal bromate stentorophonic warlike prefatorial +thermochemically Auriculariales orchiocatabasis epidymides mediateness unisexuality Pyrales ploration +acidophile floatability Triconodonta agglomeratic tetrahedral +unexplicit pleurotropous inductivity angina scotale leucophoenicite canicule subofficer +seelful ovoviviparous engrain swacking gelatinousness +interruptor pelvimetry uncombable returnability galbulus comparability almud silverhead testa +yote umbellic cresylite thermochemically crystallographical smokefarthings technopsychology +selectivity imperceptivity idiotize ascitic Caphtor unexplicit +Lincolnlike supermarket amplexifoliate ultratense appetible flushgate +ultrasystematic hoove tantivy sequentially engrain +metoxazine abstractionism parmelioid Ophiosaurus rotular sertularian returnability +ethnocracy horsefly Llandovery deindividualization impressor Bishareen photoelasticity gelatinousness +Socraticism overinstruct fossilism unstipulated comprovincial +monilioid homotransplant Homoiousian Sphenodontidae tricae +gala cartful undeterring propheticism unpremonished playfellowship Ochnaceae molecule +interruptor asparaginic leucophoenicite critically guitarist obolus orthopedical +unpredict glaumrie Sphenodontidae bot codisjunct arsenide basto +throbless prolificy dosseret decidable unanatomized shibuichi dishpan +cattimandoo seeingness osteopaedion Dodecatheon penult benzothiofuran obispo sertularian calabazilla +friarhood dastardliness sheepskin scabbiness subfoliar +marshiness undercolored venialness brutism collegian seelful chronist +qualminess putative zanyism transcorporeal liberatress +orchiocatabasis heavenful Whilkut flippantness archesporial hondo tetragynian +theologal inductivity silverhead Chiasmodontidae sombreroed hysterogen daytime +saponaceous depthwise unimmortal porencephalous Swaziland arteriasis thermochemically knob planosubulate +arval hellbender ploration silicize chasmy +planosubulate abstractionism periclitation basto ethnocracy unrealize circumzenithal +Protestantize rechar archididascalian scabbardless ticktick terrestrially Ophiosaurus +osteopaedion mutter venialness Bushongo photoelasticity +sertularian sportswomanship ticktick cubby vesperal clanned +qualminess codisjunct ornithodelphous ultraobscure biopsic tautness involatile winterproof Consolamentum +bunghole Whilkut abusiveness scabbardless trillion benzoperoxide perculsive +Saponaria penult wemless erlking lifter pneumonalgia tetrahedral paradisean experientialist +thiodiazole imprescribable nonprofession outguess subirrigate instructiveness seminonflammable prepavement Semecarpus +omega unstipulated knob defensibly misexposition circular inertly chilblain +Hysterocarpus enterostomy obolus thiodiazole totaquina beadroll generalizable +sangaree nonexecutive allegedly obolus schoolmasterism cubit tomorrowness swoony +reeveland insatiately redesertion Shiah overbuilt nativeness +Pyrales adscendent counterappellant friarhood porencephalous +licitness silicize familist squdge biodynamics +seditious misthread gelatinousness diurnalness daytime unpeople Dadaism +Christianopaganism abstractionism parabolicness gala marten +cervisial divinator myesthesia prefatorial epididymitis daytime chasmy magnificently +undiffusive scotching umbellic metoxazine Confervales ultraobscure oflete erlking trip +Protestantize Dadaism antivenin laubanite pony divinator +horsefly dipsomaniacal Cercosporella unfurbelowed frontoorbital infrastapedial +Caphtor squdge absvolt Swaziland Filipendula prescriptible +admissory preoral Munnopsidae eer depressingly undangered unrealize +overcrown seelful daytime epidymides cacuminal Consolamentum +Harpa starosta rehabilitative guitarist breadwinner +glandularly vesperal thermoresistant quailberry lyrebird cloy +critically misexposition sloped temporomastoid subtransverse Quakerishly scrat horsefly +signifier Bermudian tambo Megaluridae Pithecolobium beatable +prescriber uvanite unachievable apopenptic sawdust adscendent +hysterogen divinator Uraniidae okonite sportswomanship Hester +Llandovery laurinoxylon merciful devilwise carposporangial Triconodonta mammonish tautness +suspend naprapath angina isopelletierin predisputant intuition predebit amender +starosta subtransverse massedly toplike kenno drome refective ipomoein +mustafina critically orchiocatabasis Consolamentum bacterioblast bladderwort warlike +epidymides astronomize cacuminal naught sonable +eternal unpredict palaeotheriodont infravaginal plugger Lincolnlike aquiline overcontribute folious +pomiferous eulogization veterinarian rechar incalculable +meloplasty seelful Tsonecan leucophoenicite bettermost +chilblain obolus Uraniidae preparative goodwill parastas hemimelus starosta +pictorially waird cylindric predebit doubtingness socioromantic dinical Dadaism provedore +jajman infravaginal phytoma Fouquieria scrat +frontoorbital speckedness pterostigma pompiloid pope heavenful Munnopsidae +projecting carposporangial brooky nectopod shibuichi trillium undeterring furacious tambo +ornithodelphous wemless pentosuria merciful plerome +agglomeratic cubby Isokontae packsack archesporial quarried +infrastapedial reappreciate ambitus sequacity phallaceous +thiodiazole Eleusinian Chiasmodontidae abthainry unaccessible Prosobranchiata Gilaki +aconitine tartrous flushgate rosaniline hellbender Fameuse +stronghearted impugnation harr edificator stormy Saponaria swearingly +vinegarish drome centrifugalization rebilling nativeness repealableness +triakistetrahedral reeveland Uraniidae enhedge liquidity Bertat +elemicin craglike Tamil Kenipsim kenno +astucious masa pentafid Tsonecan micromembrane friarhood engrain noreast exploiter +unefficient rainproof lyrebird antalgol mutter Auriculariales sombreroed epididymitis +overbuilt danseuse subirrigate Bassaris patroller +Bermudian eucalypteol stroking Thraupidae golem ungrave hypoid amender yote +quarried quad tetrahedral doina tendomucoid valvulotomy marshiness Shiah warriorwise +topline yawler redescend topline sequentially bozal atlantite atlantite bestill +metaphonical unharmed posttraumatic evictor pseudoxanthine subofficer +Dodecatheon metastoma helminthagogic Chiasmodontidae reconciliable iniquitously +Bishareen Florissant appetible metopon tantivy electrotechnics astronomize phytoma +unsupercilious unpatched oversand ultrasystematic pictorially stronghearted +cornberry noncrystallized diopside phlogisticate ungreat +Mycogone nonrepetition intuition packsack fetlocked regardful +rizzomed amplexifoliate roughcast swoony tricae palaeotheriodont bacillite rechar doubtingness +Auriculariales pyroacetic soorkee prepavement metopon allegedly Babylonism +metapolitics eulogization sangaree unfulminated authorling evictor uninhabitedness expiscate proboscidiform +cattimandoo overinstruct Itea Mycogone redecrease Jerusalem +temporomastoid cheesecutter metrocratic sleigher golem sertularian shibuichi +botchedly cinque Eryon archistome Bertat +transcorporeal untongued enterostomy velaric pyxie cyanophilous unprovided Llandovery Gothish +codisjunct okonite various excerpt bubble extraorganismal balanocele ultraobscure calabazilla +antiabolitionist packsack dinical sapphiric bucketer helminthagogic archididascalian +antideflation shibuichi lebensraum feasibleness adz subdentate frictionlessly appetible cocksuredom +Itea qualminess Eleusinian dermorhynchous Animalivora valvula canicule enterostomy +pumpkinification enhedge slangy synovial unefficient +trailmaking uniarticular superindifference crystallographical lampyrine regardful venialness +cumbrousness transudatory quarried insatiately underskin absvolt synovial +hysterogen subdentate aneurism oinomancy cinque +terrestrially vesperal tricae subdrainage pansophism yeelaman valvula centrifugalization +speckedness choralcelo phoenicochroite Aktistetae micromembrane ineunt +cacuminal helpless epidymides oversand magnetooptics pentafid tramplike Mycogone +superindifference orgiastic jharal Prosobranchiata authorling cobeliever monander dialoguer figureheadship +ascitic glacierist sterilely nonrepetition corelysis Consolamentum bismuthiferous sud comism +shola unefficient balanocele tum redesertion Vichyite +aurothiosulphuric rechar dialoguer selectivity palaeotheriodont topline +playfellowship arrendation enterostomy molossic mesophyte seditious +airfreighter subtransverse volcano physiologian vinny Saponaria tonsure coldfinch +transude rede superindifference gelatinousness pompiloid magnetooptics chalcites +ladhood uncarefully ell rizzomed elemicin Fouquieria +rivethead oblongly admissory divinator exprobratory +aspersor cromlech manganosiderite intuition daytime unrepealably homotransplant coracomandibular retinize +autoschediastical decidable amender counterappellant impairment +periclitation predisputant undecorated Protestantize archistome +ethnocracy unachievable adz sapience triradiated rainproof pelvimetry +Dodecatheon orchiocatabasis perculsive valvulotomy ununiformly devilwise +pentagamist undangered cubit hoove dosseret underogating kerykeion Helvidian +glacierist redescend skyshine peristeropode critically antideflation Ochnaceae +unbashfulness speckedness upswell transcorporeal Semecarpus papery +flushgate elemicin refective mutter oflete louse +oratorize bicorporeal diatomaceous ramosopalmate pelvimetry preparative yeat upcushion +electrotechnics laubanite deepmost approbation epidymides disilane canicule stewardship +depravity Zuludom pomiferous winterproof iniquitously parastas +Muscicapa inventurous strammel deindividualization sequentially Dictograph pendulant charioteer stronghearted +Scanic prezygapophysial sertularian qualminess wherefrom pinulus Pincian Yannigan scabbiness +qualminess seraphism bacillite sturdied yeat commotion okonite +spiranthic wandoo diathermacy stroking Uraniidae adatom stentorophonic Ochnaceae +starer morphiomania amender floatability oversand bacterioblast oversand strander +louse trip infestation Swaziland antivenin +antideflation phallaceous ungreat placatory unexplicit Hydrangea angiopathy +arval abscission phoenicochroite catabaptist pyroacetic +ovoviviparous nebular trailmaking winterproof tricae transude Ophiosaurus +interruptor thermoresistant micromembrane sturdied speckedness sequestrum +cartful lifter plerome Spencerism Harpa danseuse +chalcites psychofugal impugnation Saponaria Tamil heliocentricism metrocratic commandingness Machiavel +Haversian Swaziland rede yawler uncarefully unfurbelowed ell splenauxe +analgic plerome Filipendula tricae pleasurehood naught +chalcites Confervales infravaginal pneumatotherapy gul alveolite +Passiflorales helpless feasibleness mammonish flatman engrain lammy +bot obispo unimmortal seraphism unburnt amender trunnel Protestantize +proauction nonprofession aprosopia obispo erythremia +sandbox unimmortal Ghent Hu chorograph +benthonic dishpan Savitar semiangle lithograph theologicopolitical cheesecutter roughcast +skyshine decardinalize calabazilla pachydermous mendacity +lithograph Serrifera hysterolysis prezygapophysial doubtingness +bogydom Scanic daytime elemicin bathysphere perfunctory mediateness untongued yeelaman +angiolymphoma mechanist Aplacentalia serpentinic yeat palaeotheriodont diatomaceous whitlowwort zenick +swacking helpless electrotechnics oinomancy Hu dosseret quintette undiffusive roughcast +Pyrales supraoesophageal angiolymphoma approbation cartful corona gorilloid +nebular anta widdle affaite crystallographical yote parmelioid unstipulated +toplike coracomandibular technopsychology scapuloradial helminthagogic counterappellant inventurous Passiflorales +hyocholic rotular commandingness epididymitis okonite unimmortal +preaffiliate transudatory inventurous Prosobranchiata stewardship +beneficent periclitation imperceptivity weism soorkee adscendent +Homoiousian daytime metapolitics aspersor unisexuality retinize Chiasmodontidae ultraobscure ipomoein +figured diopside paunchy detractive lithotresis scotale manganosiderite spookdom +moodishness dithery phallaceous approbation opacousness hepatorrhaphy Pincian Protestantize diminutively +antineuritic Coniferae licitness ethnocracy undinted componental euphonym depthwise zenick +physiologian naprapath Arneb prezygapophysial dehairer Hester overwoven chasmy +sportswomanship stewardship brag prospectiveness deaf unfurbelowed excerpt +semiangle monogoneutic zenick tambo vinny toxihaemia +projecting mechanist propheticism stormy bogydom sviatonosite Spencerism abscission +carposporangial erythrodextrin reconciliable subirrigate fallacious +crystallographical percent pterostigma predebit neurodegenerative Harpa toxihaemia hondo mechanist +folious glaumrie folious trillium uncarefully +laubanite wandoo taurocolla transcorporeal wingable metastoma allectory subirrigate +Babylonism sequestrum genii biopsic unurban +uncompromisingness phoenicochroite liberatress by Socraticism scotching drome +Triconodonta feasibleness bought spherulitic phlogisticate daytime Homoiousian piquantness +cloy perculsive cockal whittle comparability Protestantize Gilaki horsefly +undeterring angina superindifference tantivy subfebrile +sangaree starer metopon uncombable toplike angina neuromimesis infravaginal +triradiated returnability introducer planispheric dinical extraorganismal astucious biopsic +licitness prolificy toxoplasmosis subangulated chacona Orbitolina parabolicness danseuse +sapphiric refective overinstruct cylindric vinegarish knob +warlike choralcelo unreprimanded trisilicic silicize toxihaemia +omega shibuichi lithograph manny Babylonism Spatangoidea sombreroed +unpremonished familist fallacious lebensraum sequentially chorograph bladderwort +hymnic guitarist Cephalodiscus unfeeble corelysis mediateness Lincolnlike +predebit widdle heliocentricism hysterogen Hydrangea obispo sangaree interruptor Orbitolina +infestation socioromantic jajman ladhood okonite oinomancy quad Spatangoidea +horsefly preoral arval sialadenitis monstership precostal seditious unisexuality +impressor angiopathy impairment Eryon phoenicochroite greave electrotechnics +velaric frameable supraoesophageal ferrogoslarite liquidity biopsic +chorograph unpredict untongued snare ramosopalmate pomiferous kenno +Uraniidae projecting scrat antiadiaphorist involatile greave Consolamentum +Tamil glaumrie untongued bestill arrowworm beadroll +sangaree lampyrine pomiferous unachievable paradisean asparaginic pyxie mericarp +champer gelatinousness trisilicic Joachimite pompiloid +trip unobservantness trillion aneurism ordinant digitule +unchatteled engrain starer warriorwise champer Edo +upcushion wandoo redecrease balanocele glacierist nonlustrous Alethea +rechar ordinant supermarket ploration hoove migrainoid +critically circumzenithal glyphography cyanoguanidine deindividualization zanyism bicorporeal pictorially nectopod +propheticism piquantness apocalypst beatable molecule precostal volcano preagitate mendacity +diathermacy stentorophonic seminonflammable imperceptivity scrat raphis laubanite serpentinic +ovoviviparous beadroll swacking circumzenithal haply rebilling comprovincial unpatched +Animalivora metoxazine diminutively calycular amender ineunt diminutively Uraniidae +various pachydermatoid aprosopia quadrennial ferrogoslarite +outwealth cervisial intrabred subfebrile placatory +louse toxoplasmosis Lentibulariaceae participatingly slangy hemimelus amylogenesis projecting +mediateness experientialist by ovopyriform plerome +masa quailberry sonable periarthritis amylogenesis +Babylonism astronomize afterpressure unschematized figureheadship incomprehensible +mastication angina chargeably acocotl exprobratory +nonmanufacture hemimelus pelvimetry gul orchiocatabasis +hysterolysis oversand reappreciate vinny erythrodextrin louse umangite venialness skyshine +astucious transcorporeal Machiavel divinator tetragynian underskin farrantly trillium Gilaki +merciful ascitic scabbiness gemmeous proauction analgize bozal +archistome impairment metaphonical hysterolysis ununiformly +coadvice mesophyte seraphism allegedly rave +homeotypical pseudoxanthine Tamil trillion unaccessible groundneedle neuromimesis greave provedore +cubby ununiformly rechar pleurotropous gala +focaloid oversand trip prescriber Llandovery enhedge placatory astucious +metopon jajman swacking larklike guanajuatite antalgol pinulus gorilloid phlogisticate +consumptional undiffusive airfreighter ten yawler Chiasmodontidae +zanyism unanatomized hymnic pinulus uncombable +penult choralcelo circumzenithal comism transude +taver dehairer terrificness barkometer hemimelus cornberry Orbitolina imprescribable Inger +Bermudian temporomastoid scrat tristich Vaishnavism halloo +waird uncompromisingly ribaldrous Serrifera pelvimetry pyroacetic pleasurehood undangered +disilane porriginous Cercosporella metastoma poleax cumbrousness +glossing Sebastian reformatory spiranthic euphemize metaphrastical embryotic +hogmace haply preaffiliate amender rotular swangy licitness topline Shiah +frictionlessly bladderwort redecrease subdentate extraorganismal fetlocked extraorganismal +depravity terrificness Swaziland autobiographist pictorially hypochondriacism boser +Hester yote metaphrastical angiopathy upswell abthainry supraoesophageal +generalizable ramosopalmate reciprocation hypoid pseudoxanthine +allotropic erlking cubby scrubbed avengeful semiangle antalgol +phlogisticate abusiveness palaeotheriodont helminthagogic oxyterpene antiscolic angina +Bulanda Vaishnavism metapolitics uniarticular biopsic astronomize Filipendula amplexifoliate +paleornithology introducer monander unfurbelowed adatom mustafina noncrystallized +entame ununiformly wingable pompiloid ovoviviparous Christianopaganism counterappellant +lammy analgize approbation appetible farrantly pachydermatoid alveolite +ipomoein fallacious sviatonosite nativeness mericarp depravity +abusiveness glaumrie transcortical yote trailmaking comprovincial triradiated alveolite countergabion +arval hoove debellator furacious guanajuatite +pompiloid manny uncompromisingness nonsuppressed yawler +gelatinousness focaloid clanned biopsic uncarefully metrocratic corelysis Socraticism +tonsure undeterring posttraumatic chooser lammy subofficer winterproof +schoolmasterism hellbender starer subirrigate neurodegenerative pentafid insatiately monstership +diwata perfunctory wandoo brooky boser codisjunct equiconvex involatile +coracomandibular slangy bonze blurredness sural +sud iniquitously glandularly sleigher kenno perculsive Endomycetaceae +nummi cubby slangy uniarticular opacousness +Oryzorictinae Hu pope Munychian nigh diurnalness +slangy anta Ophiosaurus entame silverhead ovopyriform scotching +sheepskin imaginary agglomeratic starer shola eurythermal +pentafid taurocolla goodwill commandingness squit +saguran adz mastication noncrystallized commandingness lyrebird +unanatomized proboscidiform Scanic seizing Florissant cornberry squit +periclitation gunshop corelysis paunchy seditious Auriculariales symbiogenetically +weism oflete Spatangoidea astucious rede predebit columniform +uninhabitedness impugnation tetchy meloplasty porriginous hypochondriacism enhedge +Itea moodishness cumbrousness involatile ticktick lampyrine mastication +cornberry sleigher furacious Lemuridae seminonflammable chalcites metrocratic strander apopenptic +figured circumzenithal coadvice overwoven Gothish dehairer decidable monilioid boser +wemless blurredness metapolitics Vaishnavism laryngic +infestation biventer stapedius subtransverse vesperal preoral flutist afterpressure uncompromisingly +analgize pomiferous laubanite mastication Saponaria Haversian alen stewardship bonze +misthread excerpt by dipsomaniacal ventricous chrysochrous adatom supermarket tomorrowness +sirrah plerome noncrystallized porencephalous overbuilt terrestrially pictorially allectory charioteer +nonuple guitarist dunkadoo outguess perfunctory +collegian prefatorial knob neurotrophic semiangle Homoiousian sandbox +cresylite goladar idiotize figured periclitation roughcast parabolicness eternal +Alethea doina concretion periclitation componental +Babylonism valvulotomy seeingness alveolite anta danseuse classificational dipsomaniacal havoc +tetragynian undinted cocksuredom oversand bromate frontoorbital +prospectiveness guanajuatite inductivity semantician chronographic undangered +Semecarpus returnability terrestrially bismuthiferous bicorporeal +uniarticular Bulanda valvulotomy trophonema slipped prezygapophysial ungouged redesertion homeotypical +Thraupidae debellator characinoid sialadenitis gallybeggar volcano +bestill metaphrastical commotion preoral Sphenodontidae Glecoma parastas masa proauction +outwealth insatiately tambo folious silicize detractive oratorize zanyism +inductivity swearingly rivethead leucophoenicite hondo +overcrown imprescribable chargeably champer bogydom unbashfulness Munychian yeelaman +preoral Christianopaganism physiologian amylogenesis visceral +unrevolting appetible michigan unaccessible uncompromisingly +farrantly Lincolnlike transcorporeal clanned ell prescriber squdge tramplike thorite +slangy allotropic Cimmerianism rede edificator relaster +hyocholic octogynous Inger debromination unswanlike +mesymnion misexposition parmelioid blurredness naprapath commotion Jerusalem kerykeion noreast +noncrystallized nonsuppressed stereotypography pentafid frontoorbital +subirrigate reformatory benzoperoxide gorilloid antideflation ipomoein +embryotic poleax ventricous yote Oryzorictinae eucalypteol cyanophilous peristeropode +untongued diplomatize interruptor toplike visceral Scanic characinoid licitness +unachievable outhue tomorn knob sequestrum nonlustrous adz +piquantness piquantness parquet glossing nonmanufacture warlike atlantite chorograph +steprelationship unpeople ultrasystematic scotching dehairer refective +neuromimesis Sphenodontidae underogating cockstone chargeably warlike +excerpt cresylite manilla trillion tingly antiabolitionist infestation abscission +monilioid uncontradictableness Alethea coadvice Saponaria swacking ambitus Lentibulariaceae Mycogone +bathysphere depthwise incomprehensible deaf brag micromembrane +cretaceous abstractionism frictionlessly unpredict figureheadship ladhood +antivenin ungreat Helvidian parabolicness myesthesia unrealize diopside scotale +rehabilitative spherulitic unscourged Gilaki comparability slangy subofficer sequentially sturdied +overcrown saccharogenic marten downthrust commandingness jirble umbellic licitness manganosiderite +lineamental sapphiric unrepealably pony roughcast redescend prescriptible +supraoesophageal tomorrowness pentafid visceral monstership Socraticism misthread +swearingly uncompromisingness topline chalcites intuition ladhood erlking qualminess pony +saponaceous cumbrousness abscission embryotic various inferent lifter overbuilt +Bishareen unefficient airfreighter phoenicochroite infrastapedial mutter macropterous autobiographist morphiomania +autobiographist blurredness gunshop underskin spherulitic liberatress +licitness parodist transcortical proauction inertly mangonism Semecarpus planosubulate +epauliere Macraucheniidae nativeness larklike Swaziland bismuthiferous pelf pony +chronographic characinoid reformatory harr uninterpleaded +overstaid lampyrine pelvimetry saponaceous harr molossic +Arneb Bertat yote chrysochrous subtransverse Serrifera glossing tautness Gilaki +giantly focaloid metrocratic micromembrane inertly +afterpressure precostal thermanesthesia interfraternal inductivity brag Bulanda +ovopyriform antideflation Munnopsidae untongued soorkee metopon seraphism scotching speckedness +laurinoxylon overcontribute quailberry bathysphere stentorophonic calabazilla Muscicapa moodishness glossing +engrain volcano gunshop propheticism astronomize triradiated +laryngic discipular unforkedness adz impairment Helvidian raphis +prospectiveness classificational Effie Sphenodontidae twinling chalcites +unschematized upcushion Lentibulariaceae trunnel ticktick figureheadship bettermost evictor +giantly amylogenesis isopelletierin ramosopalmate fossilism minniebush saguran +sural allectory stradametrical adatom parabolicness goladar thorite parastas tristich +endotheliomyoma acidophile divinator magnetooptics gul exploiter iniquitously atlantite avengeful +meriquinoidal interfraternal nectopod glaumrie erythrodextrin +patroller counteractively pendulant calycular photoelasticity toxoplasmosis lithotresis magnificently penult +ethmopalatal peptonate sequacity bestill enterostomy +quadrennial Serrifera trip wingable Kenipsim eucalypteol +beneficent enhedge seelful unrealize provedore besagne barkometer laryngic preoral +Itea absvolt coldfinch various prospectiveness +archistome swacking groundneedle figured harr inductivity saponaceous cacuminal dispermy +immatchable eristically eer stewardship phoenicochroite scabbiness trophonema +diminutively magnificently hoove retinize sawdust Jerusalem +nummi hypoid elemicin detractive tetrahedral Hysterocarpus Saponaria bonze chilblain +comprovincial canicule dunkadoo monilioid sural expiscate hysterogen kenno +undinted bubble astucious insatiately angina +Thraupidae atlantite goladar ethnocracy aspersor +excerpt circular angiolymphoma sportswomanship oratorize +eulogization umbellic terrestrially shallowish manganosiderite +Cercosporella visceral counteractively impairment brooky transcortical +molecule misthread craglike champer bot +tomorrowness swearingly nonexecutive underskin rotular cinque +stiffish physiologian unchatteled tambo lienteria unanatomized +unurban oratorize enation wherefrom underskin +omniscribent oratorship decardinalize archesporial Italical Caphtor orthopedical steprelationship +trillium frenal synovial playfellowship throbless +columniform magnetooptics flutist pyrocatechol immatchable Spencerism whitlowwort scrat +manganosiderite propodiale enterostomy characinoid obolus autoschediastical unscourged pinulus chronist +chrysochrous percent balanocele acocotl nonexecutive bugre +bladderwort hellbender exploiter weism transcorporeal sialadenitis overcultured countergabion Hester +oratorship theologicopolitical flutist toxihaemia meriquinoidal hellbender terrificness Bassaris +Eleusinian friarhood overinstruct nonpoisonous carposporangial +galbulus proboscidiform triakistetrahedral semiangle apocalypst scotching topline gallybeggar +subfoliar dipsomaniacal scabbiness admissory parabolicness +tramplike thermoresistant sangaree antihero decidable depressingly +scotale hyocholic infestation Effie psychofugal apopenptic biopsic diwata +projecting uncarefully defensibly Hysterocarpus astucious quad coldfinch oversand +debromination psychofugal aspersor noncrystallized Machiavel almud unprovided +toxoplasmosis naught unforkedness reappreciate pseudohalogen sesquiquintile Vaishnavism +prescriptible plerome Itea magnificently allectory +shibuichi warlike Italical euphonym trailmaking +Bassaris catabaptist oratorship unswanlike shola propheticism +entame quailberry decardinalize supermarket taurocolla strammel +Megaluridae sloped manny deepmost reperuse underskin +canicule allotropic ultrasystematic beatable goodwill participatingly tickleproof speckedness sarcologist +devilwise ventricous daytime Oryzorictinae bespin monilioid enterostomy preparative glandularly +enhedge morphiomania monstership gorilloid balanocele topline meloplasty +molecule unleavened unimmortal corona comparability Spatangoidea +autobiographist lammy cubby transude exprobratory swacking erythrodextrin unleavened scotching +reappreciate analgic nonuple hysterogen parmelioid neuromimesis foursquare yeat +blightbird corbel bespin unprovided furacious unaccessible classificational flatman diwata +wingable stronghearted beneficent Yannigan pseudohalogen +craglike sleigher folious synovial dithery +synovial mutter inexistency basto diwata diplomatize osteopaedion feasibleness +basto oversand furacious osteopaedion centrifugalization ascitic stewardship imperceptivity +columniform whitlowwort triradiated monogoneutic uninhabitedness +percent heavenful by airfreighter mechanist +deaf incomprehensible opacousness subdentate zoonitic breadwinner cheesecutter quailberry +anta cinque flippantness cartful shibuichi undinted +lithotresis Pishquow subangulated okonite silicize +prepavement Quakerishly topline undeterring dispermy biodynamics pseudoxanthine folious +chalcites phoenicochroite tricae nonuple hymnic pentafid prezygapophysial antihero benzoperoxide +tricae Tsonecan floatability Harpa apopenptic Scorpaenidae cockal theologicopolitical Savitar +lyrebird unaccessible porencephalous shellworker alveolite jharal pneumatotherapy devilwise swacking +zanyism afterpressure unaccessible asparaginic adatom flutist guitarist placatory oversand +bogydom Endomycetaceae monstership stentorophonic tailoress topsail sandbox hondo bacterioblast +beatable overcultured manilla cubit phytonic nonsuppressed monander diwata +rosaniline peristeropode carposporangial sterilely galbulus +rehabilitative monander arrendation paradisean bonze planosubulate helpless +golem Zuludom lienteria supermarket bespin +marten astucious nonprofession Bermudian basto undecorated supermarket impressor upcushion +halloo hogmace upcushion unimmortal havoc seeingness basto playfellowship +cubit thorite tantivy seditious testa nonrepetition +dialoguer approbation carposporangial Fameuse Oryzorictinae daytime Scanic Triconodonta Munychian +bozal liquidity uncompromisingly symbiogenetically uninductive +gymnastic stormy propodiale adz quadrennial uloid incalculable +coldfinch dermorhynchous pyroacetic Mesua diopside dishpan sesquiquintile returnability unpredict +marten arval interfraternal acocotl circular scotching laubanite beadroll serpentinic +Dawsonia frenal lebensraum ticktick mangonism Munychian serpentinic +nonutilitarian collegian digitule signifier bonze thermanesthesia biodynamics sangaree +Pishquow Zuludom flutist cresylite asparaginic paranephros botchedly seditious +ornithodelphous bacterioblast Dawsonia posttraumatic tickleproof +unimmortal centrifugalization monilioid propheticism subfebrile Zuludom counterappellant admissory +expiscate benzothiofuran affaite moodishness neurodegenerative Joachimite depravity swoony +halloo Macraucheniidae gul ethnocracy nonexecutive Dawsonia +gul slangy unfeeble countergabion spiranthic lebensraum +alen warriorwise subfebrile characinoid ineunt +diminutively seizing arduousness wingable hoove Lincolnlike unrealize cattimandoo +laurinoxylon homotransplant hackneyed concretion minniebush chronist +Llandovery karyological stiffish diatomaceous exprobratory eucalypteol +corelysis hondo antihero strammel acocotl sonable +bacterioblast pony aprosopia dosseret trillium +astucious hogmace antineuritic Pyrales Thraupidae nebular refective +amylogenesis gunshop Savitar absvolt acidophile comism Auriculariales Dadaism lophotrichic +prospectiveness unscourged various uninductive interfraternal golem whitlowwort +hogmace inventurous karyological potentness danseuse dermorhynchous diwata +cartful steprelationship defensibly amplexifoliate redescend Hester stradametrical retinize hypoplastral +sturdied Uraniidae bugre yeat angina Munychian dinical debromination +sertularian Munychian redescend tramplike chronist mutter sloped +planosubulate hemimelus whittle seeingness frenal alveolite trillion transcortical +tailoress dishpan plerome Chiasmodontidae okonite +Filipendula rainproof untongued Cimmerianism Lemuridae hondo +divinator nectopod chilblain goodwill diurnalness Inger Gilaki devilwise yeat +consumptional shola mechanist tautness drome Vichyite eternal hymnic gala +knob jajman Endomycetaceae hypoid lithograph sviatonosite nonuple appetible frameable +debellator elastivity pneumatotherapy apocalypst Socraticism cylindric saponaceous dithery spiciferous +metopon osteopaedion pachydermous Inger Caphtor +Macraucheniidae bogydom electrotechnics sertularian lifter clanned unpredict greave quadrennial +intuition Jerusalem packsack deindividualization ten tambo +seelful outguess manny Zuludom sleigher sialadenitis +epididymitis epauliere almud depressingly quad Tamil Italical +bubble rede swearingly hyocholic bladderwort phytonic +gallybeggar circumzenithal unfurbelowed unachievable flatman +benthonic Saponaria galbulus planispheric emir diminutively folious gallybeggar pentagamist +naught Ludgatian palaeotheriodont horsefly inferent topsail +Serrifera boor orgiastic myesthesia seizing Eryon ungouged glyphography +moodishness magnificently toplike Harpa cheesecutter Triconodonta tendomucoid tomorrowness infravaginal +subfoliar topline pansophism Cimmerianism exploiter +cockal Tsonecan ambitus Munychian overcontribute jajman Arneb mendacity +lyrebird devilwise bestill pelf alen tramplike Edo planispheric +Auriculariales pseudohalogen sportswomanship seeingness groundneedle spiciferous Aplacentalia +umangite manilla decardinalize penult columniform serpentinic opacousness +cacuminal prefatorial metapolitics magnificently subdrainage spiranthic pterostigma calycular Eryon +Gilaki approbation mediateness atlantite seelful alveolite boser +emir adscendent dehairer centrifugalization Shiah underogating aquiline +unscourged uniarticular peristeropode abscission becomma scyphostoma +preoral unrepealably nonsuppressed gelatinousness rehabilitative guanajuatite slipped osteopaedion +adatom saguran becomma balladmonger packsack pendulant pictorially +abusiveness impairment Gilaki cartful Glecoma scotching cocksuredom unstressedly figured +epidymides Cimmerianism arrendation hackneyed Isokontae genii participatingly theologal goladar +synovial poleax Kenipsim swangy Lincolnlike massedly Eleusinian +cockstone unswanlike instructiveness gorilloid coracomandibular +prolificy diwata Saponaria ordinant figureheadship +airfreighter redescend uninductive flatman brutism +steprelationship greave gunshop chilblain tum licitness +apocalypst nonutilitarian impairment iniquitously parmelioid Protestantize centrifugalization +Fameuse folious epidymides ambitus metopon omega manny gymnastic scabbardless +trophonema swacking embryotic nigh volcano prospectiveness unobservantness ticktick Scorpaenidae +Gothish chorograph regardful exprobratory engrain trabecular friarhood +yote reciprocation jajman homeotypical Savitar prepavement peristeropode +rosaniline Alethea furacious outguess unobservantness enation charioteer bromate +zanyism drome unrepealably undecorated agglomeratic Sphenodontidae imaginary playfellowship collegian +Serrifera paleornithology lampyrine constitutor starer reeveland karyological +elastivity percent pleasurehood bucketer basto Bishareen +counteractively rave bonze sportswomanship toxoplasmosis gorilloid +diurnalness redesertion thermoresistant stormy subfebrile chacona sapphiric +unpatched enhedge aconitine hysterogen comprovincial reperuse bespin raphis skyshine +spot fallacious foursquare dosseret erythrodextrin sleigher goladar apocalypst immatchable +prefatorial tetrahedral tantivy venialness trillium +involatile technopsychology cornberry endotheliomyoma Munnopsidae guitarist ethnocracy Llandovery +comprovincial friarhood ticktick expiscate pyrocatechol embryotic +vinegarish cornberry manilla isopelletierin diurnalness sapphiric trabecular placatory uncompromisingly +nonlustrous ticktick reformatory phytonic Cimmerianism zenick Alethea cubby +snare monogoneutic superindifference Ochnaceae crystallographical japanned +unstipulated tingly steprelationship bonze antalgol unpredict manilla percent +ovopyriform unrepealably tartrous leucophoenicite wherefrom winterproof +Lemuridae placatory cheesecutter trunnel Hester gorilloid pendulant fetlocked reappreciate +sedentariness Mycogone figureheadship ferrogoslarite Quakerishly widdle aconitine refasten +tambo by dermorhynchous neurotrophic euphemious +bot Triphora Bertat micromembrane tailoress outguess allotropic manganosiderite biventer +parmelioid sonable glacierist fallacious Bermudian unprovided +bacillite unprovided unexplicit pleasurehood dispermy incalculable +Savitar octogynous subdentate danseuse antiadiaphorist +transcorporeal bacillite sertularian gallybeggar lophotrichic interruptedness masa ultrasystematic +trophonema selectivity bonze corbel mangonism omega overcontribute +affaite Yannigan Lemuridae deepmost wemless +nonrepetition balladmonger parabolicness Aktistetae prefatorial vitally instructiveness pseudohalogen uninhabitedness +outguess Zuludom Kenipsim metoxazine predisputant brutism Homoiousian misexposition +harr allectory scabbardless parabolicness taurocolla Fameuse +metaphrastical phytonic boser lophotrichic redecrease manny nonuple subdentate +collegian aspersor ten psychofugal octogynous comparability +trophonema bromate unleavened overwoven proboscidiform Dunlop +undecorated sportswomanship gorilloid pseudohalogen transudatory supraoesophageal overinstruct ovopyriform +scapuloradial tendomucoid focaloid Megaluridae exprobratory unevoked Kenipsim +emir nonexecutive seminonflammable valvula Kenipsim transcortical terrestrially supermarket +mutter glacierist bromic galbulus seraphism limpet apopenptic regardful provedore +comprovincial quadrennial lifter fallacious authorling temporomastoid antivenin ventricous orchiocatabasis +refective cuproiodargyrite acidophile technopsychology pope +Sphenodontidae tartrous subdentate nonuple Hydrangea +perfunctory danseuse erythremia Fameuse Mycogone ascitic electrotechnics +sheepskin pyxie Thraupidae seditious chronographic dermorhynchous reconciliable inductivity underogating +photoelasticity Zuludom phallaceous erythrodextrin insatiately beatable pyxie +mediateness blightbird cartful Bishareen trunnel stapedius wandoo slangy nonlustrous +corbel tickleproof downthrust waird halloo ungouged exploiter +unchatteled basto doubtingness monogoneutic minniebush cheesecutter redescend +omega lebensraum overstaid rivethead stiffish twinling +chalcites gelatinousness catabaptist comparability scyphostoma +plugger Munychian reappreciate phytoma yeat interruptor +opacousness unstressedly unswanlike undinted uncompromisingness +tailoress dosseret pamphlet Swaziland mesophyte depressingly sandbox Bermudian +Orbitolina unpredict enterostomy asparaginic ovopyriform paranephros +mastication barkometer Sphenodontidae impressor sural agglomeratic unrepealably antihero +Prosobranchiata liquidity pelvimetry comparability galbulus ethmopalatal +componental overcultured ipomoein cubit becomma divinator bot +astronomize collegian parabolicness cubit zenick angina +cretaceous dastardliness uvanite anta detractive Auriculariales by provedore totaquina +daytime ovopyriform intrabred stroking defensibly Mesua sloped +prefatorial sterilely biopsic clanned uncombable ticktick +Fouquieria liberatress warriorwise Whilkut phytoma +timbermonger Triphora overstudiousness putative evictor +sirrah counterappellant sedentariness chalcites ethmopalatal Serrifera dermorhynchous galbulus valvulotomy +Consolamentum kerykeion halloo unurban trip +spookdom paradisean seeingness moodishness uncontradictableness sombreroed homeotypical +epauliere volcano stroking engrain authorling regardful +subirrigate critically Orbitolina steprelationship trillion +interruptor debellator homeotypical nigh ethnocracy sportswomanship pinulus nonpoisonous +minniebush Caphtor ungrave frenal predebit imaginary exploiter cockstone strander +ungrave chronist unanatomized pseudoxanthine biodynamics +trip Zuludom sturdied aurothiosulphuric uncontradictableness orchiocatabasis Aplacentalia nativeness +periclitation bot crystallographical obispo sterilely Hydrangea +sonable Inger valvula pony edificator cloy diurnalness +antiscolic vinegarish ultraobscure arsenide Ophiosaurus affaite absvolt merciful Coniferae +taver chronist diathermacy frictionlessly licitness brag Pyrales pondside mediateness +tartrous aquiline bought sturdied Munychian gul brag +Passiflorales vitally overcrown Isokontae eristically whitlowwort heavenful analgic +Aplacentalia thiodiazole antivenin sapphiric daytime constitutor Coniferae +hysterolysis drome poleax schoolmasterism subirrigate sapience experientialist upcushion +vinegarish homotransplant Fouquieria obolus unrevolting ticktick quadrennial +canicule cacuminal counteralliance commandingness airfreighter +oversand sud venialness Bushongo ununiformly bismuthiferous nonexecutive +blurredness cobeliever dithery unforkedness helpless interfraternal upswell intuition posterishness +impairment Dodecatheon waird ipomoein pentagamist michigan +retinize massedly exploiter depthwise bathysphere +epauliere gorilloid sawdust downthrust lophotrichic impressor arrendation stroking Swaziland +ungreat imprescribable harr unpeople sapience pachydermatoid thermoresistant yeelaman +Chiasmodontidae mustafina expiscate temporomastoid exprobratory saccharogenic ungrave undiffusive sirrah +calabazilla thiodiazole trailmaking seraphism Zuludom Russifier archesporial embryotic Fouquieria +comprovincial scapuloradial impugnation centrifugalization airfreighter underogating disilane +throbless whittle chrysochrous Quakerishly misthread +jirble noncrystallized Scanic iniquitously benzoperoxide Aplacentalia trailmaking +anta scotching manny lineamental Bulanda golem +Orbitolina commotion sapience patroller sturdied rave undiffusive +paunchy flippantness alen experientialist manganosiderite strammel cervisial +mendacity marshiness serosanguineous crystallographical whitlowwort characinoid misexposition sapphiric +figured subangulated eer Zuludom Helvidian suspend Harpa marshiness +impugnation codisjunct Italical volcano Isokontae dinical crystallographical moodishness migrainoid +tetragynian tetrahedral asparaginic bozal depthwise deaf besagne edificator +Orbitolina nonlustrous spermaphyte galbulus Pincian +overstudiousness bugre decidable introducer redescend +Dodecatheon ungrave iniquitously uninductive orthopedical +aurothiosulphuric Alethea preparative Sebastian scabbiness +Savitar euphemious whittle balladmonger omniscribent massedly saccharogenic bubble +rizzomed reconciliable quintette excerpt rizzomed aspersor fallacious +Vaishnavism unobservantness bathysphere calabazilla frictionlessly byroad parquet topsail +massedly obispo scyphostoma benzothiofuran strammel +eer aurothiosulphuric visceral oblongly abusiveness +boser isopelletierin pelf spiciferous orgiastic triradiated +Macraucheniidae balanocele refasten quailberry barkometer Tsonecan +reformatory hemimelus gul decardinalize barkometer blurredness digitule splenauxe calycular +archididascalian michigan soorkee reeveland equiconvex +affaite Bermudian diopside epididymitis overstudiousness +playfellowship metopon friarhood manny predisputant Pishquow infestation inferent angina +tramplike trunnel shallowish daytime prescriber hoove noncrystallized monilioid collegian +tambo ladhood molossic Caphtor molossic veterinarian +uloid Cimmerianism triakistetrahedral meriquinoidal eucalypteol +calabazilla topline Christianopaganism furacious Hu doina imprescribable +rosaniline papery chrysochrous calycular exprobratory characinoid Aktistetae cartful +downthrust preagitate elastivity botchedly allectory stronghearted regardful evictor +apopenptic counteralliance tailoress beatable reappreciate +Animalivora cretaceous Endomycetaceae drome Helvidian concretion opacousness orthopedical seditious +unevoked overcultured pictorially canicule Eleusinian +Eleusinian mechanist brooky eer various mediateness +ungouged endotheliomyoma tambo ungreat arrendation haply aconitine chordacentrum folious +Munychian depravity diplomatize Edo manny subfoliar lineamental nonsuppressed +patroller antalgol overinstruct photoelasticity regardful giantly misthread +noncrystallized overbuilt cylindric noncrystallized debellator Caphtor erlking concretion +angiopathy lienteria pony gul prefatorial pleurotropous Gilaki rosaniline nonmanufacture +sequacity packsack unrealize sheepskin chargeably unstressedly lophotrichic +cheesecutter erythrodextrin verbid subsequentially bogydom tristich constitutor Uraniidae +cockal antideflation depressingly laryngic steprelationship morphiomania rivethead +appetible Isokontae depressingly seeingness sialadenitis supraoesophageal Dunlop +transcorporeal strammel homotransplant Eryon Italical sirrah homeotypical +counterappellant inertly unfurbelowed foursquare eulogization leucophoenicite exprobratory beatable sequentially +unforkedness ultrasystematic scapuloradial consumptional culm pyroacetic soorkee +angiopathy circular incalculable percent unleavened pumpkinification uloid +inferent tickleproof Pithecolobium halloo Shiah unrepealably sloped saguran +culm propodiale ultrasystematic cheesecutter discipular stroking swacking exprobratory +collegian sawdust socioromantic disilane Shiah phytonic approbation arduousness +symbiogenetically groundneedle saccharogenic uncompromisingness lyrebird antalgol nonpoisonous +noreast countergabion friarhood hypoid exploiter counteractively terrestrially +by calycular rebilling pachydermatoid inexistency +biventer bestill Lentibulariaceae counteractively Edo +throbless taurocolla golem gunshop snare +undercolored predebit engrain phytonic figured seditious breadwinner +cresylite theologicopolitical biopsic smokefarthings swangy comism propheticism +unrepealably Tamil sonable inventurous bestill allegedly Bertat unleavened Hester +genii paradisean nectopod erythrodextrin outhue +involatile exploiter gorilloid apopenptic ipomoein pelvimetry choralcelo sertularian +Alethea drome guitarist apocalypst unscourged tetchy +scapuloradial hysterogen scabbiness Hydrangea cyanoguanidine +mediateness unlapsing speckedness exploiter boor +arduousness hysterolysis patroller limpet sonable breadwinner +groundneedle unfeeble neurodegenerative trisilicic metrocratic +friarhood saponaceous sombreroed scrat glaumrie acocotl carposporangial diwata +spot undinted widdle mustafina pseudoxanthine downthrust sarcologist masa +antideflation Caphtor strander seraphism lineamental eternal packsack +sedentariness approbation scotching cobeliever prescriber magnetooptics Homoiousian +biventer Dunlop propodiale consumptional nonpoisonous euphemious noreast +tautness beneficent Yannigan Hu guitarist antihero subofficer +uncarefully emir codisjunct sud parquet cockal +ethmopalatal bozal sesquiquintile divinator beatable debellator +selectivity schoolmasterism unpremonished uninductive relaster prospectiveness hogmace +Bulanda rainproof repealableness underskin redecrease quailberry defensibly +fossilism zoonitic rainproof antihero uloid archistome +verbid hackneyed glandularly impairment pentosuria +becomma templar stroking ticktick daytime saccharogenic +craglike semantician abstractionism obispo jharal codisjunct umbellic Spatangoidea +oxyterpene emir euphonym Bermudian lifter shola rechar metastoma +weism angina interruptor barkometer Endomycetaceae dispermy misexposition penult +epidymides ununiformly uncontradictableness octogynous elemicin Eryon Helvidian shola eurythermal +nigh nonpoisonous airfreighter misthread canicule friarhood agglomeratic chronographic +commandingness guanajuatite selectivity aconitine boser widdle +Effie Lincolnlike archididascalian moodishness ell +participatingly archesporial misexposition doubtingness Ghent metapolitics +cattimandoo adatom wherefrom morphiomania squit templar +vinegarish amplexifoliate liberatress amender yawler +antihero trillium sawdust venialness regardful rizzomed unfulminated antalgol Triphora +pseudoxanthine epididymitis electrotechnics foursquare relaster eer serosanguineous disilane appetible +stachyuraceous dispermy glandularly aprosopia homeotypical +nonexecutive fallacious Hydrangea vitally macropterous sequentially arrowworm overstaid +aprosopia Tamil rotular unexplicit helminthagogic lifter oratorize +semiangle rebilling ladhood amplexifoliate Helvidian pomiferous Zuludom +gul Bulanda yeat proboscidiform absvolt arduousness unstressedly imaginary vinny +uniarticular larklike hepatorrhaphy characinoid overcontribute undeterring stroking +upcushion flatman taurocolla cyanoguanidine untongued +rainproof allegedly undangered sural bromate squdge mutter physiologian relaster +Lentibulariaceae Alethea phytoma Gothish semiangle eristically Gilaki monogoneutic rosaniline +hypoplastral pachydermous gorilloid totaquina unchatteled parodist parquet uvanite mutter +soorkee phlogisticate hoove hellbender slipped antiabolitionist mericarp cattimandoo +nonexecutive Scanic Eleusinian boor Triconodonta +autoschediastical goodwill eer reformatory Auriculariales +unstipulated gorilloid elastivity absvolt exprobratory oratorize +immatchable wandoo eucalypteol pendulant Coniferae characinoid ferrogoslarite pentafid +meloplasty biopsic avengeful putative electrotechnics imaginary paleornithology besagne +swearingly dialoguer aquiline metrocratic cubby Eleusinian byroad +homeotypical Munnopsidae cylindric bucketer Ophiosaurus +Filipendula tomorn outguess parquet docimastical inexistency acidophile +limpet reappreciate coldfinch concretion excerpt hymnic sirrah +mericarp commotion exprobratory limpet chordacentrum Swaziland boor +ipomoein roughcast thermoresistant scotale relaster subfebrile karyological +metaphonical thermanesthesia ultratense lineamental serphoid +redescend porriginous docimastical seizing rede +besagne botchedly overstudiousness intuition twinling frameable enation metaphonical eristically +potentness infrastapedial perculsive concretion dastardliness ungreat scabbiness seizing +sud lineamental spiranthic plerome quintette technopsychology uncarefully +transude eucalypteol byroad sequentially eer sequacity phoenicochroite +saguran topsail tartrous dithery bathysphere +autoschediastical metaphrastical periarthritis Sphenodontidae infestation aquiline uncombable comism inventurous +impugnation ultratense Eleusinian underogating outwealth Bertat +codisjunct reperuse hepatorrhaphy affaite nummi Bermudian chronographic dithery +diurnalness Sphenodontidae ribaldrous neurodegenerative quad undeterring cornberry +Tsonecan rede experientialist nonrepetition scotale bismuthiferous subfoliar upswell +Saponaria Yannigan undangered scotching meriquinoidal porriginous +decardinalize euphonym umbellic starer manilla unforkedness signifier debellator +oblongly aurothiosulphuric throbless perfunctory slangy snare pterostigma seminonflammable +analgize nectopod isopelletierin spiciferous amylogenesis +classificational commotion ethnocracy interfraternal abscission +neuromimesis Caphtor overstudiousness nonexecutive uncompromisingly visceral flippantness Joachimite +Vichyite glyphography Homoiousian ornithodelphous parodist rizzomed tambo oratorship +Triphora oblongly Glecoma twinling upcushion glossing orthopedical +Arneb afterpressure wingable molossic Pithecolobium pelvimetry cartful valvula +marshiness sterilely bonze sertularian valvulotomy spookdom amender pansophism Helvidian +hackneyed alen Whilkut Helvidian euphemize waird bettermost supraoesophageal paunchy +manilla gymnastic pseudoxanthine Haversian decidable reeveland nativeness pope +vinegarish glacierist Pincian slait misthread okonite endotheliomyoma mammonish osteopaedion +antineuritic isopelletierin centrifugalization goodwill adz monstership Pyrales chordacentrum ungrave +unschematized Socraticism Munnopsidae diminutively vinegarish cheesecutter +molecule bathysphere quad transudatory elastivity starer redescend magnetooptics +eurythermal bromate Ludgatian flutist Vaishnavism snare impugnation aspersor counterappellant +metaphonical gunshop Cercosporella ascitic rotular chacona imprescribable +blightbird neurotrophic leucophoenicite venialness Lincolnlike eucalypteol +winterproof decardinalize Homoiousian breadwinner stroking hysterogen obispo meriquinoidal opacousness +pumpkinification pamphlet fossilism magnificently oratorize limpet pneumatotherapy sequacity ethmopalatal +refasten elastivity saguran chronist peptonate approbation +abthainry penult furacious preagitate heliocentricism overinstruct supraoesophageal placatory beadroll +cyanophilous redesertion benzoperoxide louse porencephalous twinling goladar archistome gorilloid +silverhead pony Orbitolina percent epauliere inferent doina subirrigate guanajuatite +mammonish umangite slangy gemmeous sombreroed trophonema proboscidiform +sturdied stachyuraceous adscendent doina proboscidiform unleavened proboscidiform +flatman chronist omniscribent templar sertularian bozal taurocolla Machiavel +admissory rivethead sesquiquintile reconciliable lammy dastardliness orthopedical starosta +cattimandoo reformatory unpeople quintette interruptor +immatchable sapphiric molossic dipsomaniacal sapience Gilaki +ovoviviparous aurothiosulphuric veterinarian brutism rizzomed Socraticism masa +Confervales umangite fossilism epididymitis supraoesophageal +comism theologal afterpressure bestill various bromic meloplasty +horsefly refasten amplexifoliate preaffiliate yeat excerpt stapedius transcorporeal chorograph +Sphenodontidae pachydermous tetragynian pneumatotherapy thermanesthesia daytime involatile neurodegenerative +galbulus Babylonism incomprehensible psychofugal Triconodonta ethmopalatal nonuple pyroacetic bromate +wherefrom friarhood enhedge Aktistetae twinling downthrust trisilicic seminonflammable +lineamental alen oinomancy tailoress gemmeous Dodecatheon selectivity +embryotic unleavened papery seizing orgiastic edificator abthainry +impressor carposporangial neuromimesis pseudoxanthine pentagamist reformatory ladhood pentosuria autobiographist +pachydermous karyological trip Bassaris entame spot frontoorbital havoc bunghole +omniscribent eurythermal minniebush osteopaedion nummi vinegarish Pishquow uncarefully +frenal ineunt reconciliable parastas edificator unleavened catabaptist sandbox +theologicopolitical Florissant tristich chrysochrous playfellowship +topsail ethnocracy tricae unefficient overwoven mustafina coadvice chronographic euphemious +paunchy Spatangoidea neuromimesis uniarticular Passiflorales chacona +agglomeratic parquet Cercosporella knob plerome wingable quarried neuromimesis nonutilitarian +sequacity repealableness chooser chrysochrous Cephalodiscus +nonuple interfraternal migrainoid bought bought goladar +deaf toxoplasmosis cocksuredom molecule noncrystallized involatile +Jerusalem redecrease pyroacetic Vaishnavism glaumrie Effie refective squdge +tramplike japanned warriorwise enterostomy culm cloy fallacious +dastardliness omega nonexecutive canicule ambitus +saguran counterappellant hypoid botchedly absvolt unreprimanded Savitar unchatteled haply +angiolymphoma reeveland stachyuraceous scrubbed Llandovery +spookdom ipomoein socioromantic seeingness bestill unanatomized +scabbiness instructiveness tramplike wherefrom ell warlike mustafina Itea +unrepealably prepavement inventurous unforkedness tautness umangite unlapsing Pishquow reperuse +transcorporeal timbermonger unfulminated umangite ununiformly sawdust uninhabitedness +masa uncompromisingness metopon tetrahedral enation Arneb divinator +paunchy mutter yote orgiastic lithograph qualminess carposporangial naprapath wemless +phytonic atlantite charioteer eulogization scabbardless +sloped afterpressure immatchable ascitic gul taurocolla unforkedness tricae classificational +atlantite monander benzothiofuran yeelaman engrain +docimastical spot unscourged coadvice relaster antideflation orchiocatabasis unprovided Cephalodiscus +zanyism Vichyite comism emir avengeful Cimmerianism ununiformly +jharal scotching unfulminated sud dastardliness theologicopolitical mangonism socioromantic manny +oratorship rebilling tingly starosta noreast beneficent +hypoplastral pumpkinification lophotrichic brag semantician ambitus +spot deindividualization apocalypst halloo angina lithograph sequestrum +preoral collegian chooser enterostomy counterappellant ethmopalatal Bulanda appetible +uvanite strammel ultraobscure knob experientialist flatman +angiopathy porriginous aurothiosulphuric placatory topsail mediateness valvula +intrabred balladmonger parastas sedentariness porriginous cretaceous +outhue abusiveness stereotypography Pincian phallaceous benthonic +bonze jirble eulogization pumpkinification eulogization Savitar qualminess ovoviviparous amylogenesis +airfreighter precostal ovopyriform clanned stachyuraceous lineamental hellbender +Serrifera Semecarpus ladhood shola starer warriorwise iniquitously Christianopaganism constitutor +underskin poleax returnability unrealize swoony chilblain sombreroed scotale +disilane Macraucheniidae coracomandibular ultratense scrat generalizable pyxie +reperuse pentafid Pincian excerpt Pyrales Ludgatian meriquinoidal +ventricous acidophile chronist bacillite okonite +subdrainage chooser diurnalness commandingness aconitine +swearingly comprovincial unpatched licitness cacuminal tantivy hepatorrhaphy +euphemize seelful templar flippantness Savitar +Mesua reeveland subfoliar Spencerism undangered experientialist collegian +decidable jharal erlking beneficent Shiah reperuse stroking +pleurotropous spermaphyte flippantness serphoid aprosopia repealableness saguran +untongued overcrown classificational swearingly tailoress hondo pony clanned euphonym +lophotrichic bespin umangite abthainry Savitar Filipendula pentosuria +lienteria diatomaceous extraorganismal totaquina lyrebird benzothiofuran subsequentially incalculable glyphography +Ghent tartrous quadrennial jharal Bulanda sertularian +Yannigan eurythermal frontoorbital mammonish erythrodextrin comprovincial schoolmasterism allectory sviatonosite +leucophoenicite uncompromisingly groundneedle topsail Aktistetae +rotular dehairer Edo decidable flippantness +minniebush tomorrowness ribaldrous idiotize perculsive frictionlessly karyological flatman +Christianopaganism rechar repealableness undiffusive ethmopalatal misexposition +noreast benzoperoxide verbid Bertat yote archesporial +columniform Triconodonta Arneb undecorated unburnt quailberry uninhabitedness +ventricous enation transude exprobratory eternal +astronomize subfebrile glandularly idiotize Llandovery alen swearingly +circular bathysphere depressingly pansophism reformatory sturdied +various Munychian ten coadvice admissory +iniquitously guanajuatite sturdied oxyterpene mesymnion +refective calycular unharmed terrestrially slait diurnalness Endomycetaceae taver +Scorpaenidae peptonate underskin diminutively extraorganismal eucalypteol underskin +electrotechnics cumbrousness Gilaki bought ipomoein +rebilling champer cornberry lebensraum tum giantly clanned +ovopyriform perfunctory wemless havoc sombreroed counteractively aconitine octogynous ladhood +uncombable Filipendula glyphography pyrocatechol farrantly +licitness Mormyrus metopon Pincian scotale discipular +Filipendula by benthonic steprelationship parastas minniebush +cresylite oblongly Hu unburnt bacillite marshiness +biodynamics transcorporeal veterinarian nectopod propodiale ventricous +Vichyite Semecarpus Macraucheniidae Filipendula marten lienteria +vinny Confervales volcano exploiter defensibly unscourged +frontoorbital periarthritis omega champer chasmy +trillion pinulus scyphostoma unfurbelowed Itea spiranthic +Bishareen glyphography metapolitics lifter unimmortal depthwise terrestrially frontoorbital +gorilloid cloy cyanoguanidine counteralliance leucophoenicite fossilism +ethnocracy rosaniline exploiter doina Triconodonta gallybeggar Hydrangea overcrown +sheepskin micromembrane homeotypical unfeeble perculsive affaite homotransplant archididascalian Cimmerianism +subofficer cyanoguanidine jajman intrabred cloy Bushongo hymnic relaster +intrabred foursquare putative subofficer Gothish Lincolnlike upcushion Tamil constitutor +Eryon daytime infravaginal quadrennial debromination theologal +temporomastoid Caphtor Savitar benzoperoxide besagne +autoschediastical ploration Quakerishly tomorn rechar scrubbed +Triphora golem Dawsonia almud outguess gymnastic aneurism inductivity +predisputant euphemize Bermudian excerpt rave familist Hester hellbender unrepealably +neuromimesis phytoma Savitar cretaceous archistome uloid bugre depthwise codisjunct +monstership cumbrousness unlapsing hypoplastral terrestrially +spherulitic exploiter theologal porriginous dispermy scabbiness spot epididymitis +precostal quarried flushgate hondo aurothiosulphuric tailoress agglomeratic +osteopaedion angiopathy trillium Saponaria kenno +anta aspersor archesporial intuition neurodegenerative +seditious pansophism Scorpaenidae neurodegenerative pachydermatoid +bismuthiferous pompiloid topsail imprescribable isopelletierin allegedly untongued lithotresis frameable +cromlech diatomaceous expiscate Megaluridae Helvidian +sviatonosite stapedius overwoven groundneedle dehairer undeterring oflete pseudoxanthine decidable +debellator cylindric bladderwort porencephalous myesthesia umangite +inexistency preagitate bozal Tamil Tsonecan boor manilla +metapolitics equiconvex ascitic omega shallowish pentafid stradametrical fallacious timbermonger +erythremia paradisean Eryon quadrennial transudatory oxyterpene velaric comparability +infrastapedial topsail enation unswanlike almud relaster seraphism planosubulate +larklike perculsive impugnation oinomancy homotransplant gunshop gelatinousness genii +pumpkinification predisputant infrastapedial thiodiazole glossing scrat +ovoviviparous Chiasmodontidae lithotresis bought sapphiric commotion palaeotheriodont saguran +analgic patroller diurnalness Machiavel sportswomanship +unburnt Machiavel incomprehensible gemmeous glyphography spookdom elastivity unchatteled +dunkadoo reeveland Yannigan dinical metoxazine craglike fallacious antalgol +outguess Passiflorales nonsuppressed squit parastas migrainoid various pansophism almud +incomprehensible excerpt balanocele helpless oratorship japanned regardful familist +benzoperoxide sialadenitis digitule swearingly inferent Babylonism unschematized autobiographist ovopyriform +commandingness Cercosporella bacillite redecrease euphemious +subdentate nativeness palaeotheriodont collegian prescriber +engrain afterpressure wandoo various diopside +hemimelus corelysis hogmace Spatangoidea silicize +sesquiquintile brag morphiomania balanocele antineuritic detractive frontoorbital +roughcast socioromantic antihero farrantly reconciliable +alveolite scapuloradial percent hypoplastral chasmy porencephalous unforkedness +Sphenodontidae Spatangoidea molossic zoonitic glyphography analgic frenal besagne +electrotechnics oratorship speckedness scrubbed trillium blightbird +jirble Lemuridae excerpt digitule laryngic sequacity +Dodecatheon charioteer angiopathy strander percent antalgol +kerykeion scapuloradial bromic giantly splenauxe equiconvex Spencerism +brutism overcultured overstaid ethmopalatal tristich parastas penult jirble toxihaemia +manilla retinize periclitation gorilloid stentorophonic carposporangial eulogization undecorated pony +planosubulate scrubbed crystallographical symbiogenetically subangulated +unimmortal generalizable inexistency relaster balladmonger technopsychology spherulitic +unobservantness paunchy Scanic flippantness marshiness theologal strammel +swangy steprelationship cumbrousness oxyterpene waird +seraphism chordacentrum carposporangial Zuludom Kenipsim intrabred quarried Confervales Filipendula +theologicopolitical tomorn decidable balanocele mesophyte hogmace predisputant +qualminess rede pentosuria Edo scabbiness arduousness larklike +diathermacy Fameuse neurotrophic marshiness unexplicit terrificness outhue noncrystallized subfoliar +dermorhynchous prepavement Haversian seminonflammable shallowish +roughcast ploration oblongly warlike hysterogen bromic amylogenesis unachievable trabecular +transcorporeal defensibly circumzenithal supermarket unbashfulness +prezygapophysial unleavened bucketer templar Mormyrus ethmopalatal Russifier ethnocracy swearingly +prospectiveness sedentariness splenauxe terrificness pelf +Saponaria hackneyed wherefrom diplomatize columniform inferent Coniferae +unreprimanded Hydrangea mesymnion pendulant unrealize hypoid synovial Machiavel glyphography +Consolamentum soorkee aconitine decidable Mormyrus verbid undeterring +ununiformly stradametrical meriquinoidal Aplacentalia terrificness pleurotropous Bishareen isopelletierin +asparaginic downthrust cartful decidable Inger perculsive diathermacy +slangy Oryzorictinae afterpressure tambo figureheadship Hydrangea ribaldrous benthonic +orthopedical pendulant sequentially tetchy licitness antivenin +unbashfulness chordacentrum epididymitis Dadaism frictionlessly pachydermous doubtingness +uninhabitedness minniebush abthainry liquidity entame uncombable catabaptist Bishareen +nonprofession sequestrum airfreighter tum cresylite goodwill shallowish +precostal trunnel periclitation Mormyrus arrendation preoral Glecoma cresylite +analgize daytime undiffusive Helvidian pentafid retinize bunghole reeveland trunnel +perfunctory benzothiofuran classificational posttraumatic paradisean diwata Homoiousian euphonym scotale +undeterring nativeness Hester blurredness untongued ordinant jajman +Llandovery homotransplant havoc pictorially regardful hepatorrhaphy arsenide Spatangoidea defensibly +Florissant undeterring Gilaki ipomoein trophonema Vaishnavism neuromimesis generalizable +daytime Lentibulariaceae pseudohalogen cubit Protestantize imprescribable +pictorially pentafid dastardliness flutist wemless chordacentrum times +overstudiousness provedore authorling liberatress participatingly circular Muscicapa +verbid Mycogone cornberry taurocolla Ophiosaurus +unevoked uncompromisingness scabbiness ploration columniform regardful haply +monstership consumptional unstressedly Endomycetaceae Socraticism adscendent +archididascalian parquet yawler Scanic umangite giantly +tantivy valvula cornberry uncombable infestation squit +idiotize rehabilitative orthopedical acocotl swearingly unschematized ventricous afterpressure tartrous +louse galbulus alveolite Homoiousian doubtingness +wingable admissory mechanist foursquare hymnic nebular exploiter tetrahedral squdge +kenno Scorpaenidae aquiline gemmeous foursquare frameable +karyological stapedius superindifference Eryon jharal sloped embryotic Ochnaceae overcultured +choralcelo Cephalodiscus thiodiazole gelatinousness posttraumatic hemimelus euphemious aconitine provedore +umangite halloo Fameuse greave strammel aprosopia devilwise +benzoperoxide dunkadoo sheepskin unisexuality spermaphyte +mendacity uncompromisingly vinny unfulminated Vichyite uninterpleaded massedly oxyterpene +catabaptist splenauxe unstipulated psychofugal sirrah +oinomancy underogating Lentibulariaceae commotion quadrennial cornberry arrendation +feasibleness refasten monander pyrocatechol incalculable trip antineuritic +spookdom mediateness nonexecutive unfulminated hepatorrhaphy ultraobscure benzothiofuran orthopedical arval +constitutor fetlocked sialadenitis twinling drome spot naught +Spatangoidea omega Eryon mechanist uloid +Gothish Lemuridae opacousness trisilicic tautness endotheliomyoma +transudatory Swaziland refasten laryngic byroad depressingly pneumatotherapy by Saponaria +proboscidiform bladderwort spermaphyte Jerusalem Tamil +tautness columniform sloped besagne exploiter +parodist balladmonger uncompromisingness trailmaking interruptedness blightbird +shallowish Lincolnlike Yannigan preagitate quarried sequestrum +friarhood uninterpleaded pumpkinification posttraumatic inexistency +liquidity hellbender debromination semantician uncarefully swacking bacillite +rehabilitative umbellic dipsomaniacal venialness Thraupidae stereotypography velaric +monogoneutic constitutor chargeably bozal prescriber +drome nonrepetition allegedly soorkee karyological +overcontribute exprobratory synovial planispheric cinque serphoid overcultured +nonsuppressed archididascalian infrastapedial lienteria insatiately +thermochemically sequentially quintette nectopod Babylonism unharmed +unfulminated mesophyte Savitar galbulus abthainry unfeeble quintette characinoid +entame rebilling jharal Triphora metopon kenno arduousness +osteopaedion uninductive dosseret embryotic strammel Jerusalem diff --git a/examples/storm-redis-examples/pom.xml b/examples/storm-redis-examples/pom.xml new file mode 100644 index 00000000000..d8f380db513 --- /dev/null +++ b/examples/storm-redis-examples/pom.xml @@ -0,0 +1,100 @@ + + + + 4.0.0 + + + storm + org.apache.storm + 2.8.4-SNAPSHOT + ../../pom.xml + + + storm-redis-examples + + + + org.apache.storm + storm-client + ${project.version} + ${provided.scope} + + + org.apache.storm + storm-redis + ${project.version} + + + commons-cli + commons-cli + + + + + + + org.apache.maven.plugins + maven-shade-plugin + + true + + + *:* + + META-INF/*.SF + META-INF/*.sf + META-INF/*.DSA + META-INF/*.dsa + META-INF/*.RSA + META-INF/*.rsa + META-INF/*.EC + META-INF/*.ec + META-INF/MSFTSIG.SF + META-INF/MSFTSIG.RSA + + + + + + + package + + shade + + + + + + + + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + + + org.apache.maven.plugins + maven-pmd-plugin + + + + diff --git a/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/tools/Base64ToBinaryStateMigrationUtil.java b/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/tools/Base64ToBinaryStateMigrationUtil.java new file mode 100644 index 00000000000..9d0d4c113d4 --- /dev/null +++ b/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/tools/Base64ToBinaryStateMigrationUtil.java @@ -0,0 +1,168 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.redis.tools; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.CommandLineParser; +import org.apache.commons.cli.DefaultParser; +import org.apache.commons.cli.HelpFormatter; +import org.apache.commons.cli.Options; +import org.apache.commons.cli.ParseException; +import org.apache.commons.codec.binary.Base64; +import org.apache.storm.redis.common.commands.RedisCommands; +import org.apache.storm.redis.common.config.JedisPoolConfig; +import org.apache.storm.redis.common.container.RedisCommandsContainerBuilder; +import org.apache.storm.redis.common.container.RedisCommandsInstanceContainer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import redis.clients.jedis.util.SafeEncoder; + +public class Base64ToBinaryStateMigrationUtil { + private static final Logger LOG = LoggerFactory.getLogger(Base64ToBinaryStateMigrationUtil.class); + private static final String OPTION_REDIS_HOST_SHORT = "h"; + private static final String OPTION_REDIS_HOST_LONG = "host"; + private static final String OPTION_REDIS_PORT_SHORT = "p"; + private static final String OPTION_REDIS_PORT_LONG = "port"; + private static final String OPTION_REDIS_PASSWORD_LONG = "password"; + private static final String OPTION_REDIS_DB_NUM_SHORT = "d"; + private static final String OPTION_REDIS_DB_NUM_LONG = "dbnum"; + private static final String OPTION_NAMESPACE_SHORT = "n"; + private static final String OPTION_NAMESPACE_LONG = "namespace"; + + private final RedisCommandsInstanceContainer container; + + public Base64ToBinaryStateMigrationUtil(JedisPoolConfig poolConfig) { + this(RedisCommandsContainerBuilder.build(poolConfig)); + } + + public Base64ToBinaryStateMigrationUtil(RedisCommandsInstanceContainer container) { + this.container = container; + } + + private void migrate(String namespace) { + String prepareNamespace = namespace + "$prepare"; + + RedisCommands commands = null; + try { + commands = container.getInstance(); + + migrateHashIfExists(commands, prepareNamespace); + migrateHashIfExists(commands, namespace); + } finally { + container.returnInstance(commands); + } + + } + + private void migrateHashIfExists(RedisCommands commands, String key) { + if (commands.exists(key)) { + LOG.info("Migrating '{}'...", key); + + String backupKey = key + "_old"; + + LOG.info("Backing up current state '{}' to '{}'...", key, backupKey); + commands.rename(key, backupKey); + + LOG.info("Reading current state '{}'...", key); + Map currentValueMap = commands.hgetAll(key); + + LOG.info("Converting state..."); + Map convertedValueMap = convertBase64MapToBinaryMap(currentValueMap); + + LOG.info("Pushing converted state to '{}'...", key); + commands.hmset(SafeEncoder.encode(key), convertedValueMap); + } + } + + private Map convertBase64MapToBinaryMap(Map base64Map) { + Map binaryMap = new HashMap<>(); + for (Map.Entry entry : base64Map.entrySet()) { + String key = entry.getKey(); + String value = entry.getValue(); + + byte[] binaryKey = Base64.decodeBase64(key); + byte[] binaryValue = Base64.decodeBase64(value); + + binaryMap.put(binaryKey, binaryValue); + } + + return binaryMap; + } + + /** + * Main entry. + * + * @param args command line arguments + * @throws IOException IOException + * @throws ParseException ParseException + */ + public static void main(String[] args) throws IOException, ParseException { + Options options = buildOptions(); + CommandLineParser parser = new DefaultParser(); + CommandLine commandLine = parser.parse(options, args); + + if (!commandLine.hasOption(OPTION_NAMESPACE_LONG)) { + printUsageAndExit(options, OPTION_NAMESPACE_LONG + " is required"); + } + + String[] namespaces = commandLine.getOptionValues(OPTION_NAMESPACE_LONG); + String host = commandLine.getOptionValue(OPTION_REDIS_HOST_LONG, "localhost"); + String portStr = commandLine.getOptionValue(OPTION_REDIS_PORT_LONG, "6379"); + String password = commandLine.getOptionValue(OPTION_REDIS_PASSWORD_LONG); + String dbNumStr = commandLine.getOptionValue(OPTION_REDIS_DB_NUM_LONG, "0"); + + JedisPoolConfig jedisPoolConfig = new JedisPoolConfig.Builder() + .setHost(host) + .setPort(Integer.parseInt(portStr)) + .setPassword(password) + .setDatabase(Integer.parseInt(dbNumStr)) + .setTimeout(2000) + .build(); + + Base64ToBinaryStateMigrationUtil migrationUtil = new Base64ToBinaryStateMigrationUtil(jedisPoolConfig); + + for (String namespace : namespaces) { + migrationUtil.migrate(namespace); + } + + LOG.info("Done..."); + } + + private static Options buildOptions() { + Options options = new Options(); + options.addOption(OPTION_NAMESPACE_SHORT, OPTION_NAMESPACE_LONG, true, "REQUIRED the list of namespace to migrate."); + options.addOption(OPTION_REDIS_HOST_SHORT, OPTION_REDIS_HOST_LONG, true, "Redis hostname (default: localhost)"); + options.addOption(OPTION_REDIS_PORT_SHORT, OPTION_REDIS_PORT_LONG, true, "Redis port (default: 6379)"); + options.addOption(null, OPTION_REDIS_PASSWORD_LONG, true, "Redis password (default: no password)"); + options.addOption(OPTION_REDIS_DB_NUM_SHORT, OPTION_REDIS_DB_NUM_LONG, true, "Redis DB number (default: 0)"); + return options; + } + + private static void printUsageAndExit(Options options, String message) { + LOG.error(message); + HelpFormatter formatter = new HelpFormatter(); + formatter.printHelp("Base64ToBinaryStateMigrationUtil ", options); + System.exit(1); + } + +} diff --git a/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/topology/LookupWordCount.java b/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/topology/LookupWordCount.java new file mode 100644 index 00000000000..243f1bffc46 --- /dev/null +++ b/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/topology/LookupWordCount.java @@ -0,0 +1,162 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.redis.topology; + +import com.google.common.collect.Lists; + +import java.util.List; +import java.util.Map; +import java.util.Random; + +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.redis.bolt.RedisLookupBolt; +import org.apache.storm.redis.common.config.JedisPoolConfig; +import org.apache.storm.redis.common.mapper.RedisDataTypeDescription; +import org.apache.storm.redis.common.mapper.RedisLookupMapper; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.topology.base.BaseRichBolt; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.ITuple; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class LookupWordCount { + private static final String WORD_SPOUT = "WORD_SPOUT"; + private static final String LOOKUP_BOLT = "LOOKUP_BOLT"; + private static final String PRINT_BOLT = "PRINT_BOLT"; + + private static final String TEST_REDIS_HOST = "127.0.0.1"; + private static final int TEST_REDIS_PORT = 6379; + + public static class PrintWordTotalCountBolt extends BaseRichBolt { + private static final Logger LOG = LoggerFactory.getLogger(PrintWordTotalCountBolt.class); + private static final Random RANDOM = new Random(); + private OutputCollector collector; + + @Override + public void prepare(Map topoConf, TopologyContext context, OutputCollector collector) { + this.collector = collector; + } + + @Override + public void execute(Tuple input) { + String wordName = input.getStringByField("wordName"); + String countStr = input.getStringByField("count"); + + // print lookup result with low probability + if (RANDOM.nextInt(1000) > 995) { + int count = 0; + if (countStr != null) { + count = Integer.parseInt(countStr); + } + LOG.info("Lookup result - word : " + wordName + " / count : " + count); + } + + collector.ack(input); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + } + } + + public static void main(String[] args) throws Exception { + String host = TEST_REDIS_HOST; + int port = TEST_REDIS_PORT; + + if (args.length >= 2) { + host = args[0]; + port = Integer.parseInt(args[1]); + } + + JedisPoolConfig poolConfig = new JedisPoolConfig.Builder() + .setHost(host).setPort(port).build(); + + WordSpout spout = new WordSpout(); + RedisLookupMapper lookupMapper = setupLookupMapper(); + RedisLookupBolt lookupBolt = new RedisLookupBolt(poolConfig, lookupMapper); + + PrintWordTotalCountBolt printBolt = new PrintWordTotalCountBolt(); + + //wordspout -> lookupbolt + TopologyBuilder builder = new TopologyBuilder(); + builder.setSpout(WORD_SPOUT, spout, 1); + builder.setBolt(LOOKUP_BOLT, lookupBolt, 1).shuffleGrouping(WORD_SPOUT); + builder.setBolt(PRINT_BOLT, printBolt, 1).shuffleGrouping(LOOKUP_BOLT); + + String topoName = "test"; + if (args.length == 3) { + topoName = args[2]; + } else if (args.length > 3) { + System.out.println("Usage: LookupWordCount (topology name)"); + return; + } + Config config = new Config(); + StormSubmitter.submitTopology(topoName, config, builder.createTopology()); + } + + private static RedisLookupMapper setupLookupMapper() { + return new WordCountRedisLookupMapper(); + } + + private static class WordCountRedisLookupMapper implements RedisLookupMapper { + private RedisDataTypeDescription description; + private final String hashKey = "wordCount"; + + WordCountRedisLookupMapper() { + description = new RedisDataTypeDescription( + RedisDataTypeDescription.RedisDataType.HASH, hashKey); + } + + @Override + public List toTuple(ITuple input, Object value) { + String member = getKeyFromTuple(input); + List values = Lists.newArrayList(); + values.add(new Values(member, value)); + return values; + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("wordName", "count")); + } + + @Override + public RedisDataTypeDescription getDataTypeDescription() { + return description; + } + + @Override + public String getKeyFromTuple(ITuple tuple) { + return tuple.getStringByField("word"); + } + + @Override + public String getValueFromTuple(ITuple tuple) { + return null; + } + } +} diff --git a/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/topology/PersistentWordCount.java b/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/topology/PersistentWordCount.java new file mode 100644 index 00000000000..93f790b9921 --- /dev/null +++ b/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/topology/PersistentWordCount.java @@ -0,0 +1,102 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.redis.topology; + +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.redis.bolt.RedisStoreBolt; +import org.apache.storm.redis.common.config.JedisPoolConfig; +import org.apache.storm.redis.common.mapper.RedisDataTypeDescription; +import org.apache.storm.redis.common.mapper.RedisStoreMapper; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.ITuple; + +public class PersistentWordCount { + private static final String WORD_SPOUT = "WORD_SPOUT"; + private static final String COUNT_BOLT = "COUNT_BOLT"; + private static final String STORE_BOLT = "STORE_BOLT"; + + private static final String TEST_REDIS_HOST = "127.0.0.1"; + private static final int TEST_REDIS_PORT = 6379; + + public static void main(String[] args) throws Exception { + String host = TEST_REDIS_HOST; + int port = TEST_REDIS_PORT; + + if (args.length >= 2) { + host = args[0]; + port = Integer.parseInt(args[1]); + } + + JedisPoolConfig poolConfig = new JedisPoolConfig.Builder() + .setHost(host).setPort(port).build(); + + WordSpout spout = new WordSpout(); + WordCounter bolt = new WordCounter(); + RedisStoreMapper storeMapper = setupStoreMapper(); + RedisStoreBolt storeBolt = new RedisStoreBolt(poolConfig, storeMapper); + + // wordSpout ==> countBolt ==> RedisBolt + TopologyBuilder builder = new TopologyBuilder(); + + builder.setSpout(WORD_SPOUT, spout, 1); + builder.setBolt(COUNT_BOLT, bolt, 1).fieldsGrouping(WORD_SPOUT, new Fields("word")); + builder.setBolt(STORE_BOLT, storeBolt, 1).shuffleGrouping(COUNT_BOLT); + + String topoName = "test"; + if (args.length == 3) { + topoName = args[2]; + } else if (args.length > 3) { + System.out.println("Usage: PersistentWordCount (topology name)"); + return; + } + Config config = new Config(); + StormSubmitter.submitTopology(topoName, config, builder.createTopology()); + } + + private static RedisStoreMapper setupStoreMapper() { + return new WordCountStoreMapper(); + } + + private static class WordCountStoreMapper implements RedisStoreMapper { + private RedisDataTypeDescription description; + private final String hashKey = "wordCount"; + + WordCountStoreMapper() { + description = new RedisDataTypeDescription( + RedisDataTypeDescription.RedisDataType.HASH, hashKey); + } + + @Override + public RedisDataTypeDescription getDataTypeDescription() { + return description; + } + + @Override + public String getKeyFromTuple(ITuple tuple) { + return tuple.getStringByField("word"); + } + + @Override + public String getValueFromTuple(ITuple tuple) { + return tuple.getStringByField("count"); + } + } +} diff --git a/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/topology/WhitelistWordCount.java b/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/topology/WhitelistWordCount.java new file mode 100644 index 00000000000..5b47f0cd51e --- /dev/null +++ b/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/topology/WhitelistWordCount.java @@ -0,0 +1,150 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.redis.topology; + +import java.util.Map; +import java.util.Random; + +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.redis.bolt.RedisFilterBolt; +import org.apache.storm.redis.common.config.JedisPoolConfig; +import org.apache.storm.redis.common.mapper.RedisDataTypeDescription; +import org.apache.storm.redis.common.mapper.RedisFilterMapper; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.topology.base.BaseRichBolt; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.ITuple; +import org.apache.storm.tuple.Tuple; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class WhitelistWordCount { + private static final String WORD_SPOUT = "WORD_SPOUT"; + private static final String WHITELIST_BOLT = "WHITELIST_BOLT"; + private static final String COUNT_BOLT = "COUNT_BOLT"; + private static final String PRINT_BOLT = "PRINT_BOLT"; + + private static final String TEST_REDIS_HOST = "127.0.0.1"; + private static final int TEST_REDIS_PORT = 6379; + + public static class PrintWordTotalCountBolt extends BaseRichBolt { + private static final Logger LOG = LoggerFactory.getLogger(PrintWordTotalCountBolt.class); + private static final Random RANDOM = new Random(); + private OutputCollector collector; + + @Override + public void prepare(Map topoConf, TopologyContext context, OutputCollector collector) { + this.collector = collector; + } + + @Override + public void execute(Tuple input) { + String wordName = input.getStringByField("word"); + String countStr = input.getStringByField("count"); + + // print lookup result with low probability + if (RANDOM.nextInt(1000) > 995) { + int count = 0; + if (countStr != null) { + count = Integer.parseInt(countStr); + } + LOG.info("Count result - word : " + wordName + " / count : " + count); + } + + collector.ack(input); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + } + } + + public static void main(String[] args) throws Exception { + String host = TEST_REDIS_HOST; + int port = TEST_REDIS_PORT; + + if (args.length >= 2) { + host = args[0]; + port = Integer.parseInt(args[1]); + } + + JedisPoolConfig poolConfig = new JedisPoolConfig.Builder() + .setHost(host).setPort(port).build(); + + WordSpout spout = new WordSpout(); + RedisFilterMapper filterMapper = setupWhitelistMapper(); + RedisFilterBolt whitelistBolt = new RedisFilterBolt(poolConfig, filterMapper); + WordCounter wordCounterBolt = new WordCounter(); + + TopologyBuilder builder = new TopologyBuilder(); + builder.setSpout(WORD_SPOUT, spout, 1); + builder.setBolt(WHITELIST_BOLT, whitelistBolt, 1).shuffleGrouping(WORD_SPOUT); + builder.setBolt(COUNT_BOLT, wordCounterBolt, 1).fieldsGrouping(WHITELIST_BOLT, new Fields("word")); + PrintWordTotalCountBolt printBolt = new PrintWordTotalCountBolt(); + builder.setBolt(PRINT_BOLT, printBolt, 1).shuffleGrouping(COUNT_BOLT); + + String topoName = "test"; + if (args.length == 3) { + topoName = args[2]; + } else if (args.length > 3) { + System.out.println("Usage: WhitelistWordCount [topology name]"); + return; + } + Config config = new Config(); + StormSubmitter.submitTopology(topoName, config, builder.createTopology()); + } + + private static RedisFilterMapper setupWhitelistMapper() { + return new WhitelistWordFilterMapper(); + } + + private static class WhitelistWordFilterMapper implements RedisFilterMapper { + private RedisDataTypeDescription description; + private final String setKey = "whitelist"; + + WhitelistWordFilterMapper() { + description = new RedisDataTypeDescription( + RedisDataTypeDescription.RedisDataType.SET, setKey); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("word")); + } + + @Override + public RedisDataTypeDescription getDataTypeDescription() { + return description; + } + + @Override + public String getKeyFromTuple(ITuple tuple) { + return tuple.getStringByField("word"); + } + + @Override + public String getValueFromTuple(ITuple tuple) { + return null; + } + } +} diff --git a/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/topology/WordCounter.java b/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/topology/WordCounter.java new file mode 100644 index 00000000000..a0b9714c804 --- /dev/null +++ b/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/topology/WordCounter.java @@ -0,0 +1,70 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.redis.topology; + +import com.google.common.collect.Maps; + +import java.util.Map; + +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.BasicOutputCollector; +import org.apache.storm.topology.IBasicBolt; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; + +public class WordCounter implements IBasicBolt { + private Map wordCounter = Maps.newHashMap(); + + @Override + public void prepare(Map topoConf, TopologyContext context) { + } + + @Override + public void execute(Tuple input, BasicOutputCollector collector) { + String word = input.getStringByField("word"); + int count; + if (wordCounter.containsKey(word)) { + count = wordCounter.get(word) + 1; + wordCounter.put(word, wordCounter.get(word) + 1); + } else { + count = 1; + } + + wordCounter.put(word, count); + collector.emit(new Values(word, String.valueOf(count))); + } + + @Override + public void cleanup() { + + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("word", "count")); + } + + @Override + public Map getComponentConfiguration() { + return null; + } + +} diff --git a/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/topology/WordSpout.java b/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/topology/WordSpout.java new file mode 100644 index 00000000000..ff3ec39101b --- /dev/null +++ b/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/topology/WordSpout.java @@ -0,0 +1,94 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.redis.topology; + +import java.util.Map; +import java.util.Random; +import java.util.UUID; + +import org.apache.storm.spout.SpoutOutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.IRichSpout; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Values; + +public class WordSpout implements IRichSpout { + boolean isDistributed; + SpoutOutputCollector collector; + public static final String[] words = new String[] { "apple", "orange", "pineapple", "banana", "watermelon" }; + + public WordSpout() { + this(true); + } + + public WordSpout(boolean isDistributed) { + this.isDistributed = isDistributed; + } + + public boolean isDistributed() { + return this.isDistributed; + } + + @Override + public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { + this.collector = collector; + } + + @Override + public void close() { + + } + + @Override + public void nextTuple() { + final Random rand = new Random(); + final String word = words[rand.nextInt(words.length)]; + this.collector.emit(new Values(word), UUID.randomUUID()); + Thread.yield(); + } + + @Override + public void ack(Object msgId) { + + } + + @Override + public void fail(Object msgId) { + + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("word")); + } + + @Override + public void activate() { + } + + @Override + public void deactivate() { + } + + @Override + public Map getComponentConfiguration() { + return null; + } +} diff --git a/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/trident/PrintFunction.java b/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/trident/PrintFunction.java new file mode 100644 index 00000000000..f32cfe8e037 --- /dev/null +++ b/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/trident/PrintFunction.java @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.redis.trident; + +import java.util.Random; + +import org.apache.storm.trident.operation.BaseFunction; +import org.apache.storm.trident.operation.TridentCollector; +import org.apache.storm.trident.tuple.TridentTuple; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class PrintFunction extends BaseFunction { + + private static final Logger LOG = LoggerFactory.getLogger(PrintFunction.class); + + private static final Random RANDOM = new Random(); + + @Override + public void execute(TridentTuple tuple, TridentCollector tridentCollector) { + if (RANDOM.nextInt(1000) > 995) { + LOG.info(tuple.toString()); + } + } +} diff --git a/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/trident/WordCountLookupMapper.java b/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/trident/WordCountLookupMapper.java new file mode 100644 index 00000000000..04874d5f13c --- /dev/null +++ b/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/trident/WordCountLookupMapper.java @@ -0,0 +1,58 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.redis.trident; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.storm.redis.common.mapper.RedisDataTypeDescription; +import org.apache.storm.redis.common.mapper.RedisLookupMapper; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.ITuple; +import org.apache.storm.tuple.Values; + +public class WordCountLookupMapper implements RedisLookupMapper { + @Override + public List toTuple(ITuple input, Object value) { + List values = new ArrayList(); + values.add(new Values(getKeyFromTuple(input), value)); + return values; + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("word", "value")); + } + + @Override + public RedisDataTypeDescription getDataTypeDescription() { + return new RedisDataTypeDescription(RedisDataTypeDescription.RedisDataType.HASH, "test"); + } + + @Override + public String getKeyFromTuple(ITuple tuple) { + return "test_" + tuple.getString(0); + } + + @Override + public String getValueFromTuple(ITuple tuple) { + return tuple.getInteger(1).toString(); + } +} diff --git a/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/trident/WordCountStoreMapper.java b/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/trident/WordCountStoreMapper.java new file mode 100644 index 00000000000..84538e232e4 --- /dev/null +++ b/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/trident/WordCountStoreMapper.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.redis.trident; + +import org.apache.storm.redis.common.mapper.RedisDataTypeDescription; +import org.apache.storm.redis.common.mapper.RedisStoreMapper; +import org.apache.storm.tuple.ITuple; + +public class WordCountStoreMapper implements RedisStoreMapper { + @Override + public RedisDataTypeDescription getDataTypeDescription() { + return new RedisDataTypeDescription(RedisDataTypeDescription.RedisDataType.HASH, "test"); + } + + @Override + public String getKeyFromTuple(ITuple tuple) { + return "test_" + tuple.getString(0); + } + + @Override + public String getValueFromTuple(ITuple tuple) { + return tuple.getInteger(1).toString(); + } +} diff --git a/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/trident/WordCountTridentRedis.java b/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/trident/WordCountTridentRedis.java new file mode 100644 index 00000000000..1e9991fd3aa --- /dev/null +++ b/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/trident/WordCountTridentRedis.java @@ -0,0 +1,87 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.redis.trident; + +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.generated.StormTopology; +import org.apache.storm.redis.common.config.JedisPoolConfig; +import org.apache.storm.redis.common.mapper.RedisLookupMapper; +import org.apache.storm.redis.common.mapper.RedisStoreMapper; +import org.apache.storm.redis.trident.state.RedisState; +import org.apache.storm.redis.trident.state.RedisStateQuerier; +import org.apache.storm.redis.trident.state.RedisStateUpdater; +import org.apache.storm.trident.Stream; +import org.apache.storm.trident.TridentState; +import org.apache.storm.trident.TridentTopology; +import org.apache.storm.trident.testing.FixedBatchSpout; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Values; + +public class WordCountTridentRedis { + + public static StormTopology buildTopology(String redisHost, Integer redisPort) { + Fields fields = new Fields("word", "count"); + FixedBatchSpout spout = new FixedBatchSpout(fields, 4, + new Values("storm", 1), + new Values("trident", 1), + new Values("needs", 1), + new Values("javadoc", 1) + ); + spout.setCycle(true); + + JedisPoolConfig poolConfig = new JedisPoolConfig.Builder() + .setHost(redisHost).setPort(redisPort) + .build(); + + RedisStoreMapper storeMapper = new WordCountStoreMapper(); + RedisLookupMapper lookupMapper = new WordCountLookupMapper(); + RedisState.Factory factory = new RedisState.Factory(poolConfig); + + TridentTopology topology = new TridentTopology(); + Stream stream = topology.newStream("spout1", spout); + + stream.partitionPersist(factory, + fields, + new RedisStateUpdater(storeMapper).withExpire(86400000), + new Fields()); + + TridentState state = topology.newStaticState(factory); + stream = stream.stateQuery(state, new Fields("word"), + new RedisStateQuerier(lookupMapper), + new Fields("columnName", "columnValue")); + stream.each(new Fields("word", "columnValue"), new PrintFunction(), new Fields()); + return topology.build(); + } + + public static void main(String[] args) throws Exception { + if (args.length != 2) { + System.out.println("Usage: WordCountTrident redis-host redis-port"); + System.exit(1); + } + + String redisHost = args[0]; + Integer redisPort = Integer.valueOf(args[1]); + + Config conf = new Config(); + conf.setMaxSpoutPending(5); + conf.setNumWorkers(3); + StormSubmitter.submitTopology("test_wordCounter_for_redis", conf, buildTopology(redisHost, redisPort)); + } +} diff --git a/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/trident/WordCountTridentRedisCluster.java b/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/trident/WordCountTridentRedisCluster.java new file mode 100644 index 00000000000..7ced2afdadf --- /dev/null +++ b/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/trident/WordCountTridentRedisCluster.java @@ -0,0 +1,95 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.redis.trident; + +import java.net.InetSocketAddress; +import java.util.HashSet; +import java.util.Set; + +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.generated.StormTopology; +import org.apache.storm.redis.common.config.JedisClusterConfig; +import org.apache.storm.redis.common.mapper.RedisLookupMapper; +import org.apache.storm.redis.common.mapper.RedisStoreMapper; +import org.apache.storm.redis.trident.state.RedisClusterState; +import org.apache.storm.redis.trident.state.RedisClusterStateQuerier; +import org.apache.storm.redis.trident.state.RedisClusterStateUpdater; +import org.apache.storm.trident.Stream; +import org.apache.storm.trident.TridentState; +import org.apache.storm.trident.TridentTopology; +import org.apache.storm.trident.testing.FixedBatchSpout; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Values; + +public class WordCountTridentRedisCluster { + + public static StormTopology buildTopology(String redisHostPort) { + Fields fields = new Fields("word", "count"); + FixedBatchSpout spout = new FixedBatchSpout(fields, 4, + new Values("storm", 1), + new Values("trident", 1), + new Values("needs", 1), + new Values("javadoc", 1) + ); + spout.setCycle(true); + + Set nodes = new HashSet(); + for (String hostPort : redisHostPort.split(",")) { + String[] hostPortSplit = hostPort.split(":"); + nodes.add(new InetSocketAddress(hostPortSplit[0], Integer.valueOf(hostPortSplit[1]))); + } + JedisClusterConfig clusterConfig = new JedisClusterConfig.Builder().setNodes(nodes) + .build(); + + RedisStoreMapper storeMapper = new WordCountStoreMapper(); + RedisLookupMapper lookupMapper = new WordCountLookupMapper(); + RedisClusterState.Factory factory = new RedisClusterState.Factory(clusterConfig); + + TridentTopology topology = new TridentTopology(); + Stream stream = topology.newStream("spout1", spout); + + stream.partitionPersist(factory, + fields, + new RedisClusterStateUpdater(storeMapper).withExpire(86400000), + new Fields()); + + TridentState state = topology.newStaticState(factory); + stream = stream.stateQuery(state, new Fields("word"), + new RedisClusterStateQuerier(lookupMapper), + new Fields("columnName", "columnValue")); + stream.each(new Fields("word", "columnValue"), new PrintFunction(), new Fields()); + return topology.build(); + } + + public static void main(String[] args) throws Exception { + if (args.length != 1) { + System.out.println("Usage: WordCountTrident 127.0.0.1:6379,127.0.0.1:6380"); + System.exit(1); + } + + String redisHostPort = args[0]; + + Config conf = new Config(); + conf.setMaxSpoutPending(5); + conf.setNumWorkers(3); + StormSubmitter.submitTopology("test_wordCounter_for_redis", conf, buildTopology(redisHostPort)); + } + +} diff --git a/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/trident/WordCountTridentRedisClusterMap.java b/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/trident/WordCountTridentRedisClusterMap.java new file mode 100644 index 00000000000..b1425c9bf34 --- /dev/null +++ b/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/trident/WordCountTridentRedisClusterMap.java @@ -0,0 +1,89 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.redis.trident; + +import java.net.InetSocketAddress; +import java.util.HashSet; +import java.util.Set; + +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.generated.StormTopology; +import org.apache.storm.redis.common.config.JedisClusterConfig; +import org.apache.storm.redis.common.mapper.RedisDataTypeDescription; +import org.apache.storm.redis.trident.state.RedisClusterMapState; +import org.apache.storm.trident.Stream; +import org.apache.storm.trident.TridentState; +import org.apache.storm.trident.TridentTopology; +import org.apache.storm.trident.operation.builtin.MapGet; +import org.apache.storm.trident.operation.builtin.Sum; +import org.apache.storm.trident.state.StateFactory; +import org.apache.storm.trident.testing.FixedBatchSpout; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Values; + +public class WordCountTridentRedisClusterMap { + + public static StormTopology buildTopology(String redisHostPort) { + Fields fields = new Fields("word", "count"); + FixedBatchSpout spout = new FixedBatchSpout(fields, 4, + new Values("storm", 1), + new Values("trident", 1), + new Values("needs", 1), + new Values("javadoc", 1) + ); + spout.setCycle(true); + + Set nodes = new HashSet(); + for (String hostPort : redisHostPort.split(",")) { + String[] hostPortSplit = hostPort.split(":"); + nodes.add(new InetSocketAddress(hostPortSplit[0], Integer.valueOf(hostPortSplit[1]))); + } + JedisClusterConfig clusterConfig = new JedisClusterConfig.Builder().setNodes(nodes) + .build(); + RedisDataTypeDescription dataTypeDescription = new RedisDataTypeDescription( + RedisDataTypeDescription.RedisDataType.HASH, "test"); + StateFactory factory = RedisClusterMapState.transactional(clusterConfig, dataTypeDescription); + + TridentTopology topology = new TridentTopology(); + Stream stream = topology.newStream("spout1", spout); + + TridentState state = stream.groupBy(new Fields("word")) + .persistentAggregate(factory, new Fields("count"), new Sum(), new Fields("sum")); + + stream.stateQuery(state, new Fields("word"), new MapGet(), new Fields("sum")) + .each(new Fields("word", "sum"), new PrintFunction(), new Fields()); + return topology.build(); + } + + public static void main(String[] args) throws Exception { + if (args.length != 1) { + System.out.println("Usage: WordCountTrident 127.0.0.1:6379,127.0.0.1:6380"); + System.exit(1); + } + + String redisHostPort = args[0]; + + Config conf = new Config(); + conf.setMaxSpoutPending(5); + conf.setNumWorkers(3); + StormSubmitter.submitTopology("test_wordCounter_for_redis", conf, buildTopology(redisHostPort)); + } + +} diff --git a/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/trident/WordCountTridentRedisMap.java b/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/trident/WordCountTridentRedisMap.java new file mode 100644 index 00000000000..2a7af97f3b8 --- /dev/null +++ b/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/trident/WordCountTridentRedisMap.java @@ -0,0 +1,83 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.redis.trident; + +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.generated.StormTopology; +import org.apache.storm.redis.common.config.JedisPoolConfig; +import org.apache.storm.redis.common.mapper.RedisDataTypeDescription; +import org.apache.storm.redis.trident.state.RedisMapState; +import org.apache.storm.trident.Stream; +import org.apache.storm.trident.TridentState; +import org.apache.storm.trident.TridentTopology; +import org.apache.storm.trident.operation.builtin.MapGet; +import org.apache.storm.trident.operation.builtin.Sum; +import org.apache.storm.trident.state.StateFactory; +import org.apache.storm.trident.testing.FixedBatchSpout; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Values; + +public class WordCountTridentRedisMap { + + public static StormTopology buildTopology(String redisHost, Integer redisPort) { + Fields fields = new Fields("word", "count"); + FixedBatchSpout spout = new FixedBatchSpout(fields, 4, + new Values("storm", 1), + new Values("trident", 1), + new Values("needs", 1), + new Values("javadoc", 1) + ); + spout.setCycle(true); + + JedisPoolConfig poolConfig = new JedisPoolConfig.Builder() + .setHost(redisHost).setPort(redisPort) + .build(); + + RedisDataTypeDescription dataTypeDescription = new RedisDataTypeDescription( + RedisDataTypeDescription.RedisDataType.HASH, "test"); + StateFactory factory = RedisMapState.transactional(poolConfig, dataTypeDescription); + + TridentTopology topology = new TridentTopology(); + Stream stream = topology.newStream("spout1", spout); + + TridentState state = stream.groupBy(new Fields("word")) + .persistentAggregate(factory, new Fields("count"), new Sum(), new Fields("sum")); + + stream.stateQuery(state, new Fields("word"), new MapGet(), new Fields("sum")) + .each(new Fields("word", "sum"), new PrintFunction(), new Fields()); + return topology.build(); + } + + public static void main(String[] args) throws Exception { + if (args.length != 2) { + System.out.println("Usage: WordCountTrident redis-host redis-port"); + System.exit(1); + } + + String redisHost = args[0]; + Integer redisPort = Integer.valueOf(args[1]); + + Config conf = new Config(); + conf.setMaxSpoutPending(5); + conf.setNumWorkers(3); + StormSubmitter.submitTopology("test_wordCounter_for_redis", conf, buildTopology(redisHost, redisPort)); + } + +} diff --git a/examples/storm-starter/README.markdown b/examples/storm-starter/README.markdown index 98a9749b197..2d6fe202903 100644 --- a/examples/storm-starter/README.markdown +++ b/examples/storm-starter/README.markdown @@ -22,11 +22,11 @@ Table of Contents First, you need `java` and `git` installed and in your user's `PATH`. Also, two of the examples in storm-starter require Python and Ruby. -Next, make sure you have the storm-starter code available on your machine. Git/GitHub beginners may want to use the +Next, make sure you have the storm-starter code available on your machine. If you have already downloaded storm from http://storm.apache.org/downloads.html then you will find the storm-starter code under your `apache-storm-/examples/` directory. Alternatively, Git/GitHub beginners may want to use the following command to download the latest storm-starter code and change to the new directory that contains the downloaded -code. +code, but make sure you have the same version of `storm` running. - $ git clone git://github.com/apache/incubator-storm.git && cd incubator-storm/examples/storm-starter + $ git clone git://github.com/apache/storm.git && cd storm/examples/storm-starter ## storm-starter overview @@ -35,17 +35,18 @@ code. storm-starter contains a variety of examples of using Storm. If this is your first time working with Storm, check out these topologies first: -1. [ExclamationTopology](src/jvm/storm/starter/ExclamationTopology.java): Basic topology written in all Java -2. [WordCountTopology](src/jvm/storm/starter/WordCountTopology.java): Basic topology that makes use of multilang by +1. [ExclamationTopology](src/jvm/org/apache/storm/starter/ExclamationTopology.java): Basic topology written in all Java +2. [WordCountTopology](src/jvm/org/apache/storm/starter/WordCountTopology.java): Basic topology that makes use of multilang by implementing one bolt in Python -3. [ReachTopology](src/jvm/storm/starter/ReachTopology.java): Example of complex DRPC on top of Storm +3. [ReachTopology](src/jvm/org/apache/storm/starter/ReachTopology.java): Example of complex DRPC on top of Storm +4. [LambdaTopology](src/jvm/org/apache/storm/starter/LambdaTopology.java): Example of writing spout/bolt using Java8 lambda expression After you have familiarized yourself with these topologies, take a look at the other topopologies in -[src/jvm/storm/starter/](src/jvm/storm/starter/) such as [RollingTopWords](src/jvm/storm/starter/RollingTopWords.java) +[src/jvm/org/apache/storm/starter/](src/jvm/org/apache/storm/starter/) such as [RollingTopWords](src/jvm/org/apache/storm/starter/RollingTopWords.java) for more advanced implementations. If you want to learn more about how Storm works, please head over to the -[Storm project page](http://storm.incubator.apache.org). +[Storm project page](http://storm.apache.org). @@ -72,30 +73,32 @@ the Maven command to build and run storm-starter (see below), Maven will then be of Storm in this local Maven repository at `$HOME/.m2/repository`. -## Running topologies with Maven +## Packaging storm-starter for use on a Storm cluster -storm-starter topologies can be run with the maven-exec-plugin. For example, to -compile and run `WordCountTopology` in local mode, use the command: +You can package a jar suitable for submitting to a Storm cluster with the command: - $ mvn compile exec:java -Dstorm.topology=storm.starter.WordCountTopology + $ mvn package -You can also run clojure topologies with Maven: +This will package your code and all the non-Storm dependencies into a single "uberjar" (or "fat jar") at the path +`target/storm-starter-{version}.jar`. - $ mvn compile exec:java -Dstorm.topology=storm.starter.clj.word_count +Example filename of the uberjar: -In Windows parameter should be quoted, like this: + >>> target/storm-starter-0.9.3-incubating-SNAPSHOT.jar - $ mvn compile exec:java "-Dstorm.topology=storm.starter.clj.word_count" +You can submit (run) a topology contained in this uberjar to Storm via the `storm` CLI tool: -## Packaging storm-starter for use on a Storm cluster + # Example 1: Run the ExclamationTopology in local mode (LocalCluster) + $ storm jar target/storm-starter-*.jar org.apache.storm.starter.ExclamationTopology -local -You can package a jar suitable for submitting to a Storm cluster with the command: - - $ mvn package + # Example 2: Run the RollingTopWords in remote/cluster mode, + # under the name "production-topology" + $ storm jar target/storm-starter-*.jar org.apache.storm.starter.RollingTopWords production-topology -This will package your code and all the non-Storm dependencies into a single "uberjar" at the path -`target/storm-starter-{version}-jar-with-dependencies.jar`. +With submitting you can run topologies which use multilang, for example, `WordCountTopology`. +### Submitting a topology in local vs. remote mode. +You can also submit any topology in local mode via the `storm local` command, which works much like the `storm jar` command described above. If you need to run the examples from your IDE, you will need to modify the project slightly, to add a dependency on the `storm-server` module, and use `LocalCluster` to create a local cluster you can submit your topology to. Please see the documentation describing [Local Mode](https://github.com/apache/storm/blob/master/docs/Local-mode.md) for a complete overview of this topic. ## Running unit tests @@ -115,13 +118,14 @@ The following instructions will import storm-starter as a new project in Intelli * Open _File > Import Project..._ and navigate to the storm-starter directory of your storm clone (e.g. - `~/git/incubator-storm/examples/storm-starter`). + `~/git/storm/examples/storm-starter`). * Select _Import project from external model_, select "Maven", and click _Next_. * In the following screen, enable the checkbox _Import Maven projects automatically_. Leave all other values at their defaults. Click _Next_. +* Make sure to select the *intellij* profile in the profiles screen. This is important for making sure dependencies set correctly. * Click _Next_ on the following screen about selecting Maven projects to import. * Select the JDK to be used by IDEA for storm-starter, then click _Next_. - * At the time of this writing you should use JDK 6. - * It is strongly recommended to use Sun/Oracle JDK 6 rather than OpenJDK 6. + * At the time of this writing you should use JDK 7 and above. + * It is strongly recommended to use Oracle JDK rather than OpenJDK. * You may now optionally change the name of the project in IDEA. The default name suggested by IDEA is "storm-starter". Click _Finish_ once you are done. diff --git a/examples/storm-starter/multilang/resources/asyncSplitsentence.js b/examples/storm-starter/multilang/resources/asyncSplitsentence.js new file mode 100644 index 00000000000..66fc75e71a2 --- /dev/null +++ b/examples/storm-starter/multilang/resources/asyncSplitsentence.js @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Example for async bolt. Receives sentence and breaks it into words. + * + */ + + +var storm = require('./storm'); +var BasicBolt = storm.BasicBolt; + +function SplitSentenceBolt() { + BasicBolt.call(this); +}; + +SplitSentenceBolt.prototype = Object.create(BasicBolt.prototype); +SplitSentenceBolt.prototype.constructor = SplitSentenceBolt; + +SplitSentenceBolt.prototype.process = function(tup, done) { + var self = this; + + // Here setTimeout is not really needed, we use it to demonstrate asynchronous code in the process method: + setTimeout(function() { + var words = tup.values[0].split(" "); + words.forEach(function(word) { + self.emit({tuple: [word], anchorTupleId: tup.id}, function(taskIds) { + self.log(word + ' sent to task ids - ' + taskIds); + }); + }); + done(); + }, 5000) +} + +new SplitSentenceBolt().run(); diff --git a/examples/storm-starter/multilang/resources/randomsentence.js b/examples/storm-starter/multilang/resources/randomsentence.js new file mode 100644 index 00000000000..7fcf5e12faf --- /dev/null +++ b/examples/storm-starter/multilang/resources/randomsentence.js @@ -0,0 +1,101 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Example for storm spout. Emits random sentences. + * The original class in java - org.apache.storm.starter.spout.RandomSentenceSpout. + * + */ + +var storm = require('./storm'); +var Spout = storm.Spout; + + +var SENTENCES = [ + "the cow jumped over the moon", + "an apple a day keeps the doctor away", + "four score and seven years ago", + "snow white and the seven dwarfs", + "i am at two with nature"] + +function RandomSentenceSpout(sentences) { + Spout.call(this); + this.runningTupleId = 0; + this.sentences = sentences; + this.pending = {}; +}; + +RandomSentenceSpout.prototype = Object.create(Spout.prototype); +RandomSentenceSpout.prototype.constructor = RandomSentenceSpout; + +RandomSentenceSpout.prototype.getRandomSentence = function() { + return this.sentences[getRandomInt(0, this.sentences.length - 1)]; +} + +RandomSentenceSpout.prototype.nextTuple = function(done) { + var self = this; + var sentence = this.getRandomSentence(); + var tup = [sentence]; + var id = this.createNextTupleId(); + this.pending[id] = tup; + //This timeout can be removed if TOPOLOGY_SLEEP_SPOUT_WAIT_STRATEGY_TIME_MS is configured to 100 + setTimeout(function() { + self.emit({tuple: tup, id: id}, function(taskIds) { + self.log(tup + ' sent to task ids - ' + taskIds); + }); + done(); + },100); +} + +RandomSentenceSpout.prototype.activate = function(done) { + done(); +} + +RandomSentenceSpout.prototype.deactivate = function(done) { + done(); +} + +RandomSentenceSpout.prototype.createNextTupleId = function() { + var id = this.runningTupleId; + this.runningTupleId++; + return id; +} + +RandomSentenceSpout.prototype.ack = function(id, done) { + this.log('Received ack for - ' + id); + delete this.pending[id]; + done(); +} + +RandomSentenceSpout.prototype.fail = function(id, done) { + var self = this; + this.log('Received fail for - ' + id + '. Retrying.'); + this.emit({tuple: this.pending[id], id:id}, function(taskIds) { + self.log(self.pending[id] + ' sent to task ids - ' + taskIds); + }); + done(); +} + +/** + * Returns a random integer between min (inclusive) and max (inclusive) + */ +function getRandomInt(min, max) { + return Math.floor(Math.random() * (max - min + 1)) + min; +} + +new RandomSentenceSpout(SENTENCES).run(); diff --git a/examples/storm-starter/multilang/resources/splitsentence.js b/examples/storm-starter/multilang/resources/splitsentence.js new file mode 100755 index 00000000000..e3a86365455 --- /dev/null +++ b/examples/storm-starter/multilang/resources/splitsentence.js @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Bolt example - receives sentence and breaks it into words. + */ + +var storm = require('./storm'); +var BasicBolt = storm.BasicBolt; + +function SplitSentenceBolt() { + BasicBolt.call(this); +}; + +SplitSentenceBolt.prototype = Object.create(BasicBolt.prototype); +SplitSentenceBolt.prototype.constructor = SplitSentenceBolt; + +SplitSentenceBolt.prototype.process = function(tup, done) { + var self = this; + var words = tup.values[0].split(" "); + words.forEach(function(word) { + self.emit({tuple: [word], anchorTupleId: tup.id}, function(taskIds) { + self.log(word + ' sent to task ids - ' + taskIds); + }); + }); + done(); +} + +new SplitSentenceBolt().run(); \ No newline at end of file diff --git a/examples/storm-starter/multilang/resources/storm.py b/examples/storm-starter/multilang/resources/storm.py deleted file mode 100644 index 4bc9e824860..00000000000 --- a/examples/storm-starter/multilang/resources/storm.py +++ /dev/null @@ -1,221 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import sys -import os -import traceback -from collections import deque - -try: - import simplejson as json -except ImportError: - import json - -json_encode = lambda x: json.dumps(x) -json_decode = lambda x: json.loads(x) - -#reads lines and reconstructs newlines appropriately -def readMsg(): - msg = "" - while True: - line = sys.stdin.readline()[0:-1] - if line == "end": - break - msg = msg + line + "\n" - return json_decode(msg[0:-1]) - -MODE = None -ANCHOR_TUPLE = None - -#queue up commands we read while trying to read taskids -pending_commands = deque() - -def readTaskIds(): - if pending_taskids: - return pending_taskids.popleft() - else: - msg = readMsg() - while type(msg) is not list: - pending_commands.append(msg) - msg = readMsg() - return msg - -#queue up taskids we read while trying to read commands/tuples -pending_taskids = deque() - -def readCommand(): - if pending_commands: - return pending_commands.popleft() - else: - msg = readMsg() - while type(msg) is list: - pending_taskids.append(msg) - msg = readMsg() - return msg - -def readTuple(): - cmd = readCommand() - return Tuple(cmd["id"], cmd["comp"], cmd["stream"], cmd["task"], cmd["tuple"]) - -def sendMsgToParent(msg): - print json_encode(msg) - print "end" - sys.stdout.flush() - -def sync(): - sendMsgToParent({'command':'sync'}) - -def sendpid(heartbeatdir): - pid = os.getpid() - sendMsgToParent({'pid':pid}) - open(heartbeatdir + "/" + str(pid), "w").close() - -def emit(*args, **kwargs): - __emit(*args, **kwargs) - return readTaskIds() - -def emitDirect(task, *args, **kwargs): - kwargs[directTask] = task - __emit(*args, **kwargs) - -def __emit(*args, **kwargs): - global MODE - if MODE == Bolt: - emitBolt(*args, **kwargs) - elif MODE == Spout: - emitSpout(*args, **kwargs) - -def emitBolt(tup, stream=None, anchors = [], directTask=None): - global ANCHOR_TUPLE - if ANCHOR_TUPLE is not None: - anchors = [ANCHOR_TUPLE] - m = {"command": "emit"} - if stream is not None: - m["stream"] = stream - m["anchors"] = map(lambda a: a.id, anchors) - if directTask is not None: - m["task"] = directTask - m["tuple"] = tup - sendMsgToParent(m) - -def emitSpout(tup, stream=None, id=None, directTask=None): - m = {"command": "emit"} - if id is not None: - m["id"] = id - if stream is not None: - m["stream"] = stream - if directTask is not None: - m["task"] = directTask - m["tuple"] = tup - sendMsgToParent(m) - -def ack(tup): - sendMsgToParent({"command": "ack", "id": tup.id}) - -def fail(tup): - sendMsgToParent({"command": "fail", "id": tup.id}) - -def log(msg): - sendMsgToParent({"command": "log", "msg": msg}) - -def initComponent(): - setupInfo = readMsg() - sendpid(setupInfo['pidDir']) - return [setupInfo['conf'], setupInfo['context']] - -class Tuple: - def __init__(self, id, component, stream, task, values): - self.id = id - self.component = component - self.stream = stream - self.task = task - self.values = values - - def __repr__(self): - return '<%s%s>' % ( - self.__class__.__name__, - ''.join(' %s=%r' % (k, self.__dict__[k]) for k in sorted(self.__dict__.keys()))) - -class Bolt: - def initialize(self, stormconf, context): - pass - - def process(self, tuple): - pass - - def run(self): - global MODE - MODE = Bolt - conf, context = initComponent() - self.initialize(conf, context) - try: - while True: - tup = readTuple() - self.process(tup) - except Exception, e: - log(traceback.format_exc(e)) - -class BasicBolt: - def initialize(self, stormconf, context): - pass - - def process(self, tuple): - pass - - def run(self): - global MODE - MODE = Bolt - global ANCHOR_TUPLE - conf, context = initComponent() - self.initialize(conf, context) - try: - while True: - tup = readTuple() - ANCHOR_TUPLE = tup - self.process(tup) - ack(tup) - except Exception, e: - log(traceback.format_exc(e)) - -class Spout: - def initialize(self, conf, context): - pass - - def ack(self, id): - pass - - def fail(self, id): - pass - - def nextTuple(self): - pass - - def run(self): - global MODE - MODE = Spout - conf, context = initComponent() - self.initialize(conf, context) - try: - while True: - msg = readCommand() - if msg["command"] == "next": - self.nextTuple() - if msg["command"] == "ack": - self.ack(msg["id"]) - if msg["command"] == "fail": - self.fail(msg["id"]) - sync() - except Exception, e: - log(traceback.format_exc(e)) diff --git a/examples/storm-starter/multilang/resources/storm.rb b/examples/storm-starter/multilang/resources/storm.rb deleted file mode 100644 index 985b4123ca2..00000000000 --- a/examples/storm-starter/multilang/resources/storm.rb +++ /dev/null @@ -1,200 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -require "rubygems" -require "json" - -module Storm - module Protocol - class << self - attr_accessor :mode, :pending_taskids, :pending_commands - end - - self.pending_taskids = [] - self.pending_commands = [] - - def read_message - msg = "" - loop do - line = STDIN.readline.chomp - break if line == "end" - msg << line - msg << "\n" - end - JSON.parse msg.chomp - end - - def read_task_ids - Storm::Protocol.pending_taskids.shift || - begin - msg = read_message - until msg.is_a? Array - Storm::Protocol.pending_commands.push(msg) - msg = read_message - end - msg - end - end - - def read_command - Storm::Protocol.pending_commands.shift || - begin - msg = read_message - while msg.is_a? Array - Storm::Protocol.pending_taskids.push(msg) - msg = read_message - end - msg - end - end - - def send_msg_to_parent(msg) - puts msg.to_json - puts "end" - STDOUT.flush - end - - def sync - send_msg_to_parent({'command' => 'sync'}) - end - - def send_pid(heartbeat_dir) - pid = Process.pid - send_msg_to_parent({'pid' => pid}) - File.open("#{heartbeat_dir}/#{pid}", "w").close - end - - def emit_bolt(tup, args = {}) - stream = args[:stream] - anchors = args[:anchors] || args[:anchor] || [] - anchors = [anchors] unless anchors.is_a? Enumerable - direct = args[:direct_task] - m = {:command => :emit, :anchors => anchors.map(&:id), :tuple => tup} - m[:stream] = stream if stream - m[:task] = direct if direct - send_msg_to_parent m - read_task_ids unless direct - end - - def emit_spout(tup, args = {}) - stream = args[:stream] - id = args[:id] - direct = args[:direct_task] - m = {:command => :emit, :tuple => tup} - m[:id] = id if id - m[:stream] = stream if stream - m[:task] = direct if direct - send_msg_to_parent m - read_task_ids unless direct - end - - def emit(*args) - case Storm::Protocol.mode - when 'spout' - emit_spout(*args) - when 'bolt' - emit_bolt(*args) - end - end - - def ack(tup) - send_msg_to_parent :command => :ack, :id => tup.id - end - - def fail(tup) - send_msg_to_parent :command => :fail, :id => tup.id - end - - def log(msg) - send_msg_to_parent :command => :log, :msg => msg.to_s - end - - def handshake - setup_info = read_message - send_pid setup_info['pidDir'] - [setup_info['conf'], setup_info['context']] - end - end - - class Tuple - attr_accessor :id, :component, :stream, :task, :values - - def initialize(id, component, stream, task, values) - @id = id - @component = component - @stream = stream - @task = task - @values = values - end - - def self.from_hash(hash) - Tuple.new(*hash.values_at("id", "comp", "stream", "task", "tuple")) - end - end - - class Bolt - include Storm::Protocol - - def prepare(conf, context); end - - def process(tuple); end - - def run - Storm::Protocol.mode = 'bolt' - prepare(*handshake) - begin - while true - process Tuple.from_hash(read_command) - end - rescue Exception => e - log 'Exception in bolt: ' + e.message + ' - ' + e.backtrace.join('\n') - end - end - end - - class Spout - include Storm::Protocol - - def open(conf, context); end - - def nextTuple; end - - def ack(id); end - - def fail(id); end - - def run - Storm::Protocol.mode = 'spout' - open(*handshake) - - begin - while true - msg = read_command - case msg['command'] - when 'next' - nextTuple - when 'ack' - ack(msg['id']) - when 'fail' - fail(msg['id']) - end - sync - end - rescue Exception => e - log 'Exception in spout: ' + e.message + ' - ' + e.backtrace.join('\n') - end - end - end -end diff --git a/examples/storm-starter/pom.xml b/examples/storm-starter/pom.xml index 11146b68063..24fd5575d51 100644 --- a/examples/storm-starter/pom.xml +++ b/examples/storm-starter/pom.xml @@ -15,152 +15,215 @@ See the License for the specific language governing permissions and limitations under the License. --> - - 4.0.0 - - storm - org.apache.storm - 0.9.2-incubating-SNAPSHOT - ../../pom.xml - + + 4.0.0 + + storm + org.apache.storm + 2.8.4-SNAPSHOT + ../../pom.xml + - org.apache.storm - storm-starter - jar + storm-starter + jar - storm-starter + storm-starter + + UTF-8 + 0.98.4-hadoop2 + - - - junit - junit - test - - - org.testng - testng - 6.8.5 - test - - - org.mockito - mockito-all - test - - - org.easytesting - fest-assert-core - 2.0M8 - test - - - org.jmock - jmock - 2.6.0 - test - - - org.twitter4j - twitter4j-stream - 3.0.3 - - - org.apache.storm - storm-core - ${project.version} - - provided - - - commons-collections - commons-collections - 3.2.1 - - - com.google.guava - guava - - + + + org.hdrhistogram + HdrHistogram + + + org.testng + testng + 7.11.0 + test + + + org.mockito + mockito-core + + + org.hamcrest + hamcrest + + + org.easytesting + fest-assert-core + 2.0M10 + test + + + org.jmock + jmock + 2.13.1 + test + + + org.apache.storm + storm-clojure + ${project.version} + + + org.apache.storm + storm-clojure-test + ${project.version} + test + + + org.apache.storm + storm-client + ${project.version} + + ${provided.scope} + + + org.apache.storm + storm-client + ${project.version} + test-jar + test + + + org.apache.storm + multilang-javascript + ${project.version} + + + org.apache.storm + multilang-ruby + ${project.version} + + + org.apache.storm + multilang-python + ${project.version} + + + com.google.guava + guava + + + org.apache.storm + storm-metrics + ${project.version} + + + org.apache.storm + storm-hdfs + ${project.version} + + + org.apache.storm + storm-redis + ${project.version} + + - - src/jvm - test/jvm - - - ${basedir}/multilang - - + + src/jvm + test/jvm + + + ${basedir}/multilang + + - - - - maven-assembly-plugin - - - jar-with-dependencies - - - - - - - - - - make-assembly - package - - single - - - - - - - com.theoryinpractise - clojure-maven-plugin - true - - - src/clj - - - - - compile - compile - - compile - - - - - - - org.codehaus.mojo - exec-maven-plugin - 1.2.1 - - - - exec - - - - - java - true - false - compile - ${storm.topology} - - - - + + + org.apache.maven.plugins + maven-shade-plugin + + true + + + *:* + + META-INF/*.SF + META-INF/*.sf + META-INF/*.DSA + META-INF/*.dsa + META-INF/*.RSA + META-INF/*.rsa + META-INF/*.EC + META-INF/*.ec + META-INF/MSFTSIG.SF + META-INF/MSFTSIG.RSA + + + + + + + package + + shade + + + + + + + + + + + + + com.theoryinpractise + clojure-maven-plugin + true + + + src/clj + + + test/clj + + + + + compile + compile + + compile + + + + test-clojure + test + + test + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + + none + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + + + org.apache.maven.plugins + maven-pmd-plugin + + + diff --git a/examples/storm-starter/src/clj/org/apache/storm/starter/clj/bolts.clj b/examples/storm-starter/src/clj/org/apache/storm/starter/clj/bolts.clj new file mode 100644 index 00000000000..270952f8c16 --- /dev/null +++ b/examples/storm-starter/src/clj/org/apache/storm/starter/clj/bolts.clj @@ -0,0 +1,79 @@ +;; Licensed to the Apache Software Foundation (ASF) under one +;; or more contributor license agreements. See the NOTICE file +;; distributed with this work for additional information +;; regarding copyright ownership. The ASF licenses this file +;; to you under the Apache License, Version 2.0 (the +;; "License"); you may not use this file except in compliance +;; with the License. You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +(ns org.apache.storm.starter.clj.bolts + (:require [org.apache.storm + [clojure :refer :all] + [config :refer :all] + [log :refer :all]]) + (:import [org.apache.storm.starter.tools + NthLastModifiedTimeTracker SlidingWindowCounter + Rankings RankableObjectWithFields] + [org.apache.storm.utils TupleUtils])) + +(defbolt rolling-count-bolt ["obj" "count" "actualWindowLengthInSeconds"] + {:prepare true + :params [window-length emit-frequency] + :conf {TOPOLOGY-TICK-TUPLE-FREQ-SECS emit-frequency}} + [conf context collector] + (let [num-windows (/ window-length emit-frequency) + counter (SlidingWindowCounter. num-windows) + tracker (NthLastModifiedTimeTracker. num-windows)] + (bolt + (execute [{word "word" :as tuple}] + (if (TupleUtils/isTick tuple) + (let [counts (.getCountsThenAdvanceWindow counter) + actual-window-length (.secondsSinceOldestModification tracker)] + (log-debug "Received tick tuple, triggering emit of current window counts") + (.markAsModified tracker) + (doseq [[obj count] counts] + (emit-bolt! collector [obj count actual-window-length]))) + (do + (.incrementCount counter word) + (ack! collector tuple))))))) + +(defmacro update-rankings [tuple collector rankings & body] + `(if (TupleUtils/isTick ~tuple) + (do + (log-debug "Received tick tuple, triggering emit of current rankings") + (emit-bolt! ~collector [(.copy ~rankings)]) + (log-debug "Rankings: " ~rankings)) + ~@body)) + +(defbolt intermediate-rankings-bolt ["rankings"] + {:prepare true + :params [top-n emit-frequency] + :conf {TOPOLOGY-TICK-TUPLE-FREQ-SECS emit-frequency}} + [conf context collector] + (let [rankings (Rankings. top-n)] + (bolt + (execute [tuple] + (update-rankings + tuple collector rankings + (.updateWith rankings (RankableObjectWithFields/from tuple))))))) + +(defbolt total-rankings-bolt ["rankings"] + {:prepare true + :params [top-n emit-frequency] + :conf {TOPOLOGY-TICK-TUPLE-FREQ-SECS emit-frequency}} + [conf context collector] + (let [rankings (Rankings. top-n)] + (bolt + (execute [{rankings-to-merge "rankings" :as tuple}] + (update-rankings + tuple collector rankings + (doto rankings + (.updateWith rankings-to-merge) + (.pruneZeroCounts))))))) diff --git a/examples/storm-starter/src/clj/org/apache/storm/starter/clj/exclamation.clj b/examples/storm-starter/src/clj/org/apache/storm/starter/clj/exclamation.clj new file mode 100644 index 00000000000..415f43f4a06 --- /dev/null +++ b/examples/storm-starter/src/clj/org/apache/storm/starter/clj/exclamation.clj @@ -0,0 +1,45 @@ +;; Licensed to the Apache Software Foundation (ASF) under one +;; or more contributor license agreements. See the NOTICE file +;; distributed with this work for additional information +;; regarding copyright ownership. The ASF licenses this file +;; to you under the Apache License, Version 2.0 (the +;; "License"); you may not use this file except in compliance +;; with the License. You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +(ns org.apache.storm.starter.clj.exclamation + (:import [org.apache.storm StormSubmitter] + [org.apache.storm.utils Utils] + [org.apache.storm.testing TestWordSpout]) + (:use [org.apache.storm clojure config]) + (:gen-class)) + +(defbolt exclamation-bolt ["word"] + [{word "word" :as tuple} collector] + (emit-bolt! collector [(str word "!!!")] :anchor tuple) + (ack! collector tuple)) + +(defn mk-topology [] + (topology + {"word" (spout-spec (TestWordSpout.) :p 10)} + {"exclaim1" (bolt-spec {"word" :shuffle} exclamation-bolt :p 3) + "exclaim2" (bolt-spec {"exclaim1" :shuffle} exclamation-bolt :p 2)})) + +(defn submit-topology! [name] + (StormSubmitter/submitTopologyWithProgressBar + name + {TOPOLOGY-DEBUG true + TOPOLOGY-WORKERS 3} + (mk-topology))) + +(defn -main + ([] + (submit-topology! "test")) + ([name] + (submit-topology! name))) diff --git a/examples/storm-starter/src/clj/org/apache/storm/starter/clj/rolling_top_words.clj b/examples/storm-starter/src/clj/org/apache/storm/starter/clj/rolling_top_words.clj new file mode 100644 index 00000000000..9a9677c86bd --- /dev/null +++ b/examples/storm-starter/src/clj/org/apache/storm/starter/clj/rolling_top_words.clj @@ -0,0 +1,52 @@ +;; Licensed to the Apache Software Foundation (ASF) under one +;; or more contributor license agreements. See the NOTICE file +;; distributed with this work for additional information +;; regarding copyright ownership. The ASF licenses this file +;; to you under the Apache License, Version 2.0 (the +;; "License"); you may not use this file except in compliance +;; with the License. You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +(ns org.apache.storm.starter.clj.rolling-top-words + (:require [org.apache.storm [clojure :refer :all] [config :refer :all]] + [org.apache.storm.starter.clj.bolts :refer + [rolling-count-bolt intermediate-rankings-bolt total-rankings-bolt]]) + (:import [org.apache.storm StormSubmitter] + [org.apache.storm.utils Utils] + [org.apache.storm.testing TestWordSpout]) + (:gen-class)) + +(defn mk-topology [] + (let [spout-id "wordGenerator" + counter-id "counter" + ranker-id "intermediateRanker" + total-ranker-id "finalRanker"] + (topology + {spout-id (spout-spec (TestWordSpout.) :p 5)} + {counter-id (bolt-spec {spout-id ["word"]} + (rolling-count-bolt 9 3) + :p 4) + ranker-id (bolt-spec {counter-id ["obj"]} + (intermediate-rankings-bolt 5 2) + :p 4) + total-ranker-id (bolt-spec {ranker-id :global} + (total-rankings-bolt 5 2))}))) + +(defn submit-topology! [name] + (StormSubmitter/submitTopology + name + {TOPOLOGY-DEBUG true + TOPOLOGY-WORKERS 3} + (mk-topology))) + +(defn -main + ([] + (submit-topology! "test")) + ([name] + (submit-topology! name))) diff --git a/examples/storm-starter/src/clj/storm/starter/clj/word_count.clj b/examples/storm-starter/src/clj/org/apache/storm/starter/clj/word_count.clj similarity index 89% rename from examples/storm-starter/src/clj/storm/starter/clj/word_count.clj rename to examples/storm-starter/src/clj/org/apache/storm/starter/clj/word_count.clj index 3b54ac81e7f..e3a52f5eb97 100644 --- a/examples/storm-starter/src/clj/storm/starter/clj/word_count.clj +++ b/examples/storm-starter/src/clj/org/apache/storm/starter/clj/word_count.clj @@ -13,9 +13,10 @@ ;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ;; See the License for the specific language governing permissions and ;; limitations under the License. -(ns storm.starter.clj.word-count - (:import [backtype.storm StormSubmitter LocalCluster]) - (:use [backtype.storm clojure config]) +(ns org.apache.storm.starter.clj.word-count + (:import [org.apache.storm StormSubmitter] + [org.apache.storm.utils Utils]) + (:use [org.apache.storm clojure config]) (:gen-class)) (defspout sentence-spout ["sentence"] @@ -73,13 +74,6 @@ word-count :p 6)})) -(defn run-local! [] - (let [cluster (LocalCluster.)] - (.submitTopology cluster "word-count" {TOPOLOGY-DEBUG true} (mk-topology)) - (Thread/sleep 10000) - (.shutdown cluster) - )) - (defn submit-topology! [name] (StormSubmitter/submitTopology name @@ -89,7 +83,7 @@ (defn -main ([] - (run-local!)) + (submit-topology! "test")) ([name] (submit-topology! name))) diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/AnchoredWordCount.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/AnchoredWordCount.java new file mode 100644 index 00000000000..1809cf19fa1 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/AnchoredWordCount.java @@ -0,0 +1,134 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter; + +import java.util.HashMap; +import java.util.Map; +import java.util.Random; +import java.util.UUID; +import org.apache.storm.Config; +import org.apache.storm.spout.SpoutOutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.BasicOutputCollector; +import org.apache.storm.topology.ConfigurableTopology; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.topology.base.BaseBasicBolt; +import org.apache.storm.topology.base.BaseRichSpout; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; +import org.apache.storm.utils.Utils; + +public class AnchoredWordCount extends ConfigurableTopology { + + @Override + protected int run(String[] args) throws Exception { + TopologyBuilder builder = new TopologyBuilder(); + + builder.setSpout("spout", new RandomSentenceSpout(), 4); + + builder.setBolt("split", new SplitSentence(), 4).shuffleGrouping("spout"); + builder.setBolt("count", new WordCount(), 4).fieldsGrouping("split", new Fields("word")); + + Config conf = new Config(); + conf.setMaxTaskParallelism(3); + + String topologyName = "word-count"; + + conf.setNumWorkers(3); + + if (args != null && args.length > 0) { + topologyName = args[0]; + } + return submit(topologyName, conf, builder); + } + + public static class RandomSentenceSpout extends BaseRichSpout { + SpoutOutputCollector collector; + Random random; + + + @Override + public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { + this.collector = collector; + this.random = new Random(); + } + + @Override + public void nextTuple() { + Utils.sleep(10); + String[] sentences = new String[]{ + sentence("the cow jumped over the moon"), sentence("an apple a day keeps the doctor away"), + sentence("four score and seven years ago"), + sentence("snow white and the seven dwarfs"), sentence("i am at two with nature") + }; + final String sentence = sentences[random.nextInt(sentences.length)]; + + this.collector.emit(new Values(sentence), UUID.randomUUID()); + } + + protected String sentence(String input) { + return input; + } + + @Override + public void ack(Object id) { + } + + @Override + public void fail(Object id) { + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("word")); + } + } + + public static class SplitSentence extends BaseBasicBolt { + @Override + public void execute(Tuple tuple, BasicOutputCollector collector) { + String sentence = tuple.getString(0); + for (String word : sentence.split("\\s+")) { + collector.emit(new Values(word, 1)); + } + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("word", "count")); + } + } + + public static class WordCount extends BaseBasicBolt { + Map counts = new HashMap<>(); + + @Override + public void execute(Tuple tuple, BasicOutputCollector collector) { + String word = tuple.getString(0); + Integer count = counts.get(word); + if (count == null) { + count = 0; + } + count++; + counts.put(word, count); + collector.emit(new Values(word, count)); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("word", "count")); + } + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/BasicDRPCTopology.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/BasicDRPCTopology.java new file mode 100644 index 00000000000..70e03a4761e --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/BasicDRPCTopology.java @@ -0,0 +1,75 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter; + +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.drpc.LinearDRPCTopologyBuilder; +import org.apache.storm.topology.BasicOutputCollector; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.base.BaseBasicBolt; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; +import org.apache.storm.utils.DRPCClient; + +/** + * This topology is a basic example of doing distributed RPC on top of Storm. It implements a function that appends a + * "!" to any string you send the DRPC function. + * + * @see Distributed RPC + */ +@SuppressWarnings("checkstyle:AbbreviationAsWordInName") +public class BasicDRPCTopology { + public static void main(String[] args) throws Exception { + Config conf = new Config(); + String topoName = "DRPCExample"; + String function = "exclamation"; + if (args != null) { + if (args.length > 0) { + topoName = args[0]; + } + if (args.length > 1) { + function = args[1]; + } + } + + LinearDRPCTopologyBuilder builder = new LinearDRPCTopologyBuilder(function); + builder.addBolt(new ExclaimBolt(), 3); + + conf.setNumWorkers(3); + StormSubmitter.submitTopologyWithProgressBar(topoName, conf, builder.createRemoteTopology()); + + if (args != null && args.length > 2) { + try (DRPCClient drpc = DRPCClient.getConfiguredClient(conf)) { + for (int i = 2; i < args.length; i++) { + String word = args[i]; + System.out.println("Result for \"" + word + "\": " + drpc.execute(function, word)); + } + } + } + } + + public static class ExclaimBolt extends BaseBasicBolt { + @Override + public void execute(Tuple tuple, BasicOutputCollector collector) { + String input = tuple.getString(1); + collector.emit(new Values(tuple.getValue(0), input + "!")); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("id", "result")); + } + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/BlobStoreAPIWordCountTopology.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/BlobStoreAPIWordCountTopology.java new file mode 100644 index 00000000000..f8059f3c1fc --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/BlobStoreAPIWordCountTopology.java @@ -0,0 +1,300 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter; + +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileReader; +import java.io.FileWriter; +import java.io.IOException; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.Set; +import java.util.StringTokenizer; +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.blobstore.AtomicOutputStream; +import org.apache.storm.blobstore.BlobStoreAclHandler; +import org.apache.storm.blobstore.ClientBlobStore; +import org.apache.storm.generated.AccessControl; +import org.apache.storm.generated.AlreadyAliveException; +import org.apache.storm.generated.AuthorizationException; +import org.apache.storm.generated.InvalidTopologyException; +import org.apache.storm.generated.KeyAlreadyExistsException; +import org.apache.storm.generated.KeyNotFoundException; +import org.apache.storm.generated.SettableBlobMeta; +import org.apache.storm.spout.SpoutOutputCollector; +import org.apache.storm.task.ShellBolt; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.BasicOutputCollector; +import org.apache.storm.topology.IRichBolt; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.topology.base.BaseBasicBolt; +import org.apache.storm.topology.base.BaseRichSpout; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; +import org.apache.storm.utils.Utils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings("checkstyle:AbbreviationAsWordInName") +public class BlobStoreAPIWordCountTopology { + private static final Logger LOG = LoggerFactory.getLogger(BlobStoreAPIWordCountTopology.class); + private static ClientBlobStore store; // Client API to invoke blob store API functionality + private static String key = "key"; + private static String fileName = "blacklist.txt"; + + public static void prepare() { + Config conf = new Config(); + conf.putAll(Utils.readStormConfig()); + store = Utils.getClientBlobStore(conf); + } + + // Equivalent create command on command line + // storm blobstore create --file blacklist.txt --acl o::rwa key + private static void createBlobWithContent(String blobKey, ClientBlobStore clientBlobStore, File file) + throws AuthorizationException, KeyAlreadyExistsException, IOException, KeyNotFoundException { + String stringBlobAcl = "o::rwa"; + AccessControl blobAcl = BlobStoreAclHandler.parseAccessControl(stringBlobAcl); + List acls = new LinkedList(); + acls.add(blobAcl); // more ACLs can be added here + SettableBlobMeta settableBlobMeta = new SettableBlobMeta(acls); + AtomicOutputStream blobStream = clientBlobStore.createBlob(blobKey, settableBlobMeta); + blobStream.write(readFile(file).toString().getBytes()); + blobStream.close(); + } + + // Equivalent update command on command line + // storm blobstore update --file blacklist.txt key + private static void updateBlobWithContent(String blobKey, ClientBlobStore clientBlobStore, File file) + throws KeyNotFoundException, AuthorizationException, IOException { + AtomicOutputStream blobOutputStream = clientBlobStore.updateBlob(blobKey); + blobOutputStream.write(readFile(file).toString().getBytes()); + blobOutputStream.close(); + } + + private static String getRandomSentence() { + String[] sentences = new String[]{ + "the cow jumped over the moon", "an apple a day keeps the doctor away", + "four score and seven years ago", "snow white and the seven dwarfs", "i am at two with nature" + }; + String sentence = sentences[new Random().nextInt(sentences.length)]; + return sentence; + } + + private static Set getRandomWordSet() { + Set randomWordSet = new HashSet<>(); + Random random = new Random(); + String[] words = new String[]{ + "cow", "jumped", "over", "the", "moon", "apple", "day", "doctor", "away", + "four", "seven", "ago", "snow", "white", "seven", "dwarfs", "nature", "two" + }; + // Choosing atmost 5 words to update the blacklist file for filtering + for (int i = 0; i < 5; i++) { + randomWordSet.add(words[random.nextInt(words.length)]); + } + return randomWordSet; + } + + private static Set parseFile(String fileName) throws IOException { + File file = new File(fileName); + Set wordSet = new HashSet<>(); + if (!file.exists()) { + return wordSet; + } + StringTokenizer tokens = new StringTokenizer(readFile(file).toString(), "\r\n"); + while (tokens.hasMoreElements()) { + wordSet.add(tokens.nextToken()); + } + LOG.debug("parseFile {}", wordSet); + return wordSet; + } + + private static StringBuilder readFile(File file) throws IOException { + String line; + StringBuilder fileContent = new StringBuilder(); + // Do not use canonical file name here as we are using + // symbolic links to read file data and performing atomic move + // while updating files + BufferedReader br = new BufferedReader(new FileReader(file)); + while ((line = br.readLine()) != null) { + fileContent.append(line); + fileContent.append(System.lineSeparator()); + } + return fileContent; + } + + // Creating a blacklist file to read from the disk + public static File createFile(String fileName) throws IOException { + File file = null; + file = new File(fileName); + if (!file.exists()) { + file.createNewFile(); + } + writeToFile(file, getRandomWordSet()); + return file; + } + + // Updating a blacklist file periodically with random words + public static File updateFile(File file) throws IOException { + writeToFile(file, getRandomWordSet()); + return file; + } + + // Writing random words to be blacklisted + public static void writeToFile(File file, Set content) throws IOException { + FileWriter fw = new FileWriter(file, false); + BufferedWriter bw = new BufferedWriter(fw); + Iterator iter = content.iterator(); + while (iter.hasNext()) { + bw.write(iter.next()); + bw.write(System.lineSeparator()); + } + bw.close(); + } + + public static void main(String[] args) { + prepare(); + BlobStoreAPIWordCountTopology wc = new BlobStoreAPIWordCountTopology(); + try { + File file = createFile(fileName); + // Creating blob again before launching topology + createBlobWithContent(key, store, file); + + // Blostore launch command with topology blobstore map + // Here we are giving it a local name so that we can read from the file + // bin/storm jar examples/storm-starter/storm-starter-topologies-0.11.0-SNAPSHOT.jar + // org.apache.storm.starter.BlobStoreAPIWordCountTopology bl -c + // topology.blobstore.map='{"key":{"localname":"blacklist.txt", "uncompress":"false"}}' + wc.buildAndLaunchWordCountTopology(args); + + // Updating file few times every 5 seconds + for (int i = 0; i < 10; i++) { + updateBlobWithContent(key, store, updateFile(file)); + Utils.sleep(5000); + } + } catch (KeyAlreadyExistsException kae) { + LOG.info("Key already exists {}", kae); + } catch (AuthorizationException | KeyNotFoundException | IOException exp) { + throw new RuntimeException(exp); + } + } + + public void buildAndLaunchWordCountTopology(String[] args) { + TopologyBuilder builder = new TopologyBuilder(); + builder.setSpout("spout", new RandomSentenceSpout(), 5); + builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout"); + builder.setBolt("filter", new FilterWords(), 6).shuffleGrouping("split"); + + Config conf = new Config(); + conf.setDebug(true); + try { + conf.setNumWorkers(3); + StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology()); + } catch (InvalidTopologyException | AuthorizationException | AlreadyAliveException exp) { + throw new RuntimeException(exp); + } + } + + // Spout implementation + public static class RandomSentenceSpout extends BaseRichSpout { + SpoutOutputCollector collector; + + @Override + public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { + this.collector = collector; + } + + @Override + public void nextTuple() { + Utils.sleep(100); + collector.emit(new Values(getRandomSentence())); + } + + @Override + public void ack(Object id) { + } + + @Override + public void fail(Object id) { + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("sentence")); + } + + } + + // Bolt implementation + public static class SplitSentence extends ShellBolt implements IRichBolt { + + public SplitSentence() { + super("python3", "splitsentence.py"); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("word")); + } + + @Override + public Map getComponentConfiguration() { + return null; + } + } + + public static class FilterWords extends BaseBasicBolt { + boolean poll = false; + long pollTime; + Set wordSet; + + @Override + public void execute(Tuple tuple, BasicOutputCollector collector) { + String word = tuple.getString(0); + // Thread Polling every 5 seconds to update the wordSet seconds which is + // used in FilterWords bolt to filter the words + try { + if (!poll) { + wordSet = parseFile(fileName); + pollTime = System.currentTimeMillis(); + poll = true; + } else { + if ((System.currentTimeMillis() - pollTime) > 5000) { + wordSet = parseFile(fileName); + pollTime = System.currentTimeMillis(); + } + } + } catch (IOException exp) { + throw new RuntimeException(exp); + } + if (wordSet != null && !wordSet.contains(word)) { + collector.emit(new Values(word)); + } + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("word")); + } + } +} + + diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/ExclamationTopology.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/ExclamationTopology.java new file mode 100644 index 00000000000..31854f613f3 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/ExclamationTopology.java @@ -0,0 +1,77 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter; + +import java.util.Map; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.testing.TestWordSpout; +import org.apache.storm.topology.ConfigurableTopology; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.topology.base.BaseRichBolt; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; + +/** + * This is a basic example of a Storm topology. + */ +public class ExclamationTopology extends ConfigurableTopology { + + public static void main(String[] args) throws Exception { + ConfigurableTopology.start(new ExclamationTopology(), args); + } + + @Override + protected int run(String[] args) { + TopologyBuilder builder = new TopologyBuilder(); + + builder.setSpout("word", new TestWordSpout(), 10); + builder.setBolt("exclaim1", new ExclamationBolt(), 3).shuffleGrouping("word"); + builder.setBolt("exclaim2", new ExclamationBolt(), 2).shuffleGrouping("exclaim1"); + + conf.setDebug(true); + + String topologyName = "test"; + + conf.setNumWorkers(3); + + if (args != null && args.length > 0) { + topologyName = args[0]; + } + + return submit(topologyName, conf, builder); + } + + public static class ExclamationBolt extends BaseRichBolt { + OutputCollector collector; + + @Override + public void prepare(Map conf, TopologyContext context, OutputCollector collector) { + this.collector = collector; + } + + @Override + public void execute(Tuple tuple) { + collector.emit(tuple, new Values(tuple.getString(0) + "!!!")); + collector.ack(tuple); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("word")); + } + + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/FastWordCountTopology.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/FastWordCountTopology.java new file mode 100644 index 00000000000..e5da703380b --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/FastWordCountTopology.java @@ -0,0 +1,192 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter; + +import java.util.HashMap; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.ThreadLocalRandom; +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.generated.ClusterSummary; +import org.apache.storm.generated.ExecutorSummary; +import org.apache.storm.generated.KillOptions; +import org.apache.storm.generated.Nimbus; +import org.apache.storm.generated.SpoutStats; +import org.apache.storm.generated.TopologyInfo; +import org.apache.storm.generated.TopologySummary; +import org.apache.storm.spout.SpoutOutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.BasicOutputCollector; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.topology.base.BaseBasicBolt; +import org.apache.storm.topology.base.BaseRichSpout; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; +import org.apache.storm.utils.NimbusClient; +import org.apache.storm.utils.Utils; + +/** + * WordCount but the spout does not stop, and the bolts are implemented in + * java. This can show how fast the word count can run. + */ +public class FastWordCountTopology { + + public static void printMetrics(Nimbus.Iface client, String name) throws Exception { + TopologyInfo info = client.getTopologyInfoByName(name); + int uptime = info.get_uptime_secs(); + long acked = 0; + long failed = 0; + double weightedAvgTotal = 0.0; + for (ExecutorSummary exec : info.get_executors()) { + if ("spout".equals(exec.get_component_id())) { + SpoutStats stats = exec.get_stats().get_specific().get_spout(); + Map failedMap = stats.get_failed().get(":all-time"); + Map ackedMap = stats.get_acked().get(":all-time"); + Map avgLatMap = stats.get_complete_ms_avg().get(":all-time"); + for (String key : ackedMap.keySet()) { + if (failedMap != null) { + Long tmp = failedMap.get(key); + if (tmp != null) { + failed += tmp; + } + } + long ackVal = ackedMap.get(key); + double latVal = avgLatMap.get(key) * ackVal; + acked += ackVal; + weightedAvgTotal += latVal; + } + } + } + double avgLatency = weightedAvgTotal / acked; + System.out.println("uptime: " + uptime + + " acked: " + acked + + " avgLatency: " + avgLatency + + " acked/sec: " + (((double) acked) / uptime + " failed: " + failed)); + } + + public static void kill(Nimbus.Iface client, String name) throws Exception { + KillOptions opts = new KillOptions(); + opts.set_wait_secs(0); + client.killTopologyWithOpts(name, opts); + } + + public static void main(String[] args) throws Exception { + + TopologyBuilder builder = new TopologyBuilder(); + + builder.setSpout("spout", new FastRandomSentenceSpout(), 4); + + builder.setBolt("split", new SplitSentence(), 4).shuffleGrouping("spout"); + builder.setBolt("count", new WordCount(), 4).fieldsGrouping("split", new Fields("word")); + + Config conf = new Config(); + conf.registerMetricsConsumer(org.apache.storm.metric.LoggingMetricsConsumer.class); + + String name = "wc-test"; + if (args != null && args.length > 0) { + name = args[0]; + } + + conf.setNumWorkers(1); + StormSubmitter.submitTopologyWithProgressBar(name, conf, builder.createTopology()); + + Map clusterConf = Utils.readStormConfig(); + clusterConf.putAll(Utils.readCommandLineOpts()); + Nimbus.Iface client = NimbusClient.Builder.withConf(clusterConf).build().getClient(); + + //Sleep for 5 mins + for (int i = 0; i < 10; i++) { + Thread.sleep(30 * 1000); + printMetrics(client, name); + } + kill(client, name); + } + + public static class FastRandomSentenceSpout extends BaseRichSpout { + private static final String[] CHOICES = { + "marry had a little lamb whos fleese was white as snow", + "and every where that marry went the lamb was sure to go", + "one two three four five six seven eight nine ten", + "this is a test of the emergency broadcast system this is only a test", + "peter piper picked a peck of pickeled peppers" + }; + SpoutOutputCollector collector; + Random rand; + + @Override + public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { + this.collector = collector; + rand = ThreadLocalRandom.current(); + } + + @Override + public void nextTuple() { + String sentence = CHOICES[rand.nextInt(CHOICES.length)]; + collector.emit(new Values(sentence), sentence); + } + + @Override + public void ack(Object id) { + //Ignored + } + + @Override + public void fail(Object id) { + collector.emit(new Values(id), id); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("sentence")); + } + } + + public static class SplitSentence extends BaseBasicBolt { + @Override + public void execute(Tuple tuple, BasicOutputCollector collector) { + String sentence = tuple.getString(0); + for (String word : sentence.split("\\s+")) { + collector.emit(new Values(word, 1)); + } + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("word", "count")); + } + } + + public static class WordCount extends BaseBasicBolt { + Map counts = new HashMap(); + + @Override + public void execute(Tuple tuple, BasicOutputCollector collector) { + String word = tuple.getString(0); + Integer count = counts.get(word); + if (count == null) { + count = 0; + } + count++; + counts.put(word, count); + collector.emit(new Values(word, count)); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("word", "count")); + } + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/InOrderDeliveryTest.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/InOrderDeliveryTest.java new file mode 100644 index 00000000000..4820aef2026 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/InOrderDeliveryTest.java @@ -0,0 +1,169 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter; + +import java.util.HashMap; +import java.util.Map; +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.generated.ClusterSummary; +import org.apache.storm.generated.ExecutorSummary; +import org.apache.storm.generated.KillOptions; +import org.apache.storm.generated.Nimbus; +import org.apache.storm.generated.SpoutStats; +import org.apache.storm.generated.TopologyInfo; +import org.apache.storm.generated.TopologySummary; +import org.apache.storm.spout.SpoutOutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.BasicOutputCollector; +import org.apache.storm.topology.FailedException; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.topology.base.BaseBasicBolt; +import org.apache.storm.topology.base.BaseRichSpout; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; +import org.apache.storm.utils.NimbusClient; +import org.apache.storm.utils.Utils; + +public class InOrderDeliveryTest { + + public static void printMetrics(Nimbus.Iface client, String name) throws Exception { + TopologyInfo info = client.getTopologyInfoByName(name); + int uptime = info.get_uptime_secs(); + long acked = 0; + long failed = 0; + double weightedAvgTotal = 0.0; + for (ExecutorSummary exec : info.get_executors()) { + if ("spout".equals(exec.get_component_id())) { + SpoutStats stats = exec.get_stats().get_specific().get_spout(); + Map failedMap = stats.get_failed().get(":all-time"); + Map ackedMap = stats.get_acked().get(":all-time"); + Map avgLatMap = stats.get_complete_ms_avg().get(":all-time"); + for (String key : ackedMap.keySet()) { + if (failedMap != null) { + Long tmp = failedMap.get(key); + if (tmp != null) { + failed += tmp; + } + } + long ackVal = ackedMap.get(key); + double latVal = avgLatMap.get(key) * ackVal; + acked += ackVal; + weightedAvgTotal += latVal; + } + } + } + double avgLatency = weightedAvgTotal / acked; + System.out.println("uptime: " + uptime + + " acked: " + acked + + " avgLatency: " + avgLatency + + " acked/sec: " + (((double) acked) / uptime + " failed: " + failed)); + } + + public static void kill(Nimbus.Iface client, String name) throws Exception { + KillOptions opts = new KillOptions(); + opts.set_wait_secs(0); + client.killTopologyWithOpts(name, opts); + } + + public static void main(String[] args) throws Exception { + + TopologyBuilder builder = new TopologyBuilder(); + + builder.setSpout("spout", new InOrderSpout(), 8); + builder.setBolt("count", new Check(), 8).fieldsGrouping("spout", new Fields("c1")); + + Config conf = new Config(); + conf.registerMetricsConsumer(org.apache.storm.metric.LoggingMetricsConsumer.class); + + String name = "in-order-test"; + if (args != null && args.length > 0) { + name = args[0]; + } + + conf.setNumWorkers(1); + StormSubmitter.submitTopologyWithProgressBar(name, conf, builder.createTopology()); + + Map clusterConf = Utils.readStormConfig(); + clusterConf.putAll(Utils.readCommandLineOpts()); + Nimbus.Iface client = NimbusClient.Builder.withConf(clusterConf).build().getClient(); + + //Sleep for 50 mins + for (int i = 0; i < 50; i++) { + Thread.sleep(30 * 1000); + printMetrics(client, name); + } + kill(client, name); + } + + public static class InOrderSpout extends BaseRichSpout { + SpoutOutputCollector collector; + int base = 0; + int count = 0; + + @Override + public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { + this.collector = collector; + base = context.getThisTaskIndex(); + } + + @Override + public void nextTuple() { + Values v = new Values(base, count); + collector.emit(v, "ACK"); + count++; + } + + @Override + public void ack(Object id) { + //Ignored + } + + @Override + public void fail(Object id) { + //Ignored + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("c1", "c2")); + } + } + + public static class Check extends BaseBasicBolt { + Map expected = new HashMap(); + + @Override + public void execute(Tuple tuple, BasicOutputCollector collector) { + Integer c1 = tuple.getInteger(0); + Integer c2 = tuple.getInteger(1); + Integer exp = expected.get(c1); + if (exp == null) { + exp = 0; + } + if (c2.intValue() != exp.intValue()) { + System.out.println(c1 + " " + c2 + " != " + exp); + throw new FailedException(c1 + " " + c2 + " != " + exp); + } + exp = c2 + 1; + expected.put(c1, exp); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + //Empty + } + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/JoinBoltExample.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/JoinBoltExample.java new file mode 100644 index 00000000000..6fc373970bf --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/JoinBoltExample.java @@ -0,0 +1,77 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter; + +import java.util.concurrent.TimeUnit; +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.bolt.JoinBolt; +import org.apache.storm.starter.bolt.PrinterBolt; +import org.apache.storm.testing.FeederSpout; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.topology.base.BaseWindowedBolt; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Values; +import org.apache.storm.utils.NimbusClient; + +public class JoinBoltExample { + public static void main(String[] args) throws Exception { + if (!NimbusClient.isLocalOverride()) { + throw new IllegalStateException("This example only works in local mode. " + + "Run with storm local not storm jar"); + } + FeederSpout genderSpout = new FeederSpout(new Fields("id", "gender")); + FeederSpout ageSpout = new FeederSpout(new Fields("id", "age")); + + TopologyBuilder builder = new TopologyBuilder(); + builder.setSpout("genderSpout", genderSpout); + builder.setSpout("ageSpout", ageSpout); + + // inner join of 'age' and 'gender' records on 'id' field + JoinBolt joiner = new JoinBolt("genderSpout", "id") + .join("ageSpout", "id", "genderSpout") + .select("genderSpout:id,ageSpout:id,gender,age") + .withTumblingWindow(new BaseWindowedBolt.Duration(10, TimeUnit.SECONDS)); + + builder.setBolt("joiner", joiner) + .fieldsGrouping("genderSpout", new Fields("id")) + .fieldsGrouping("ageSpout", new Fields("id")); + + builder.setBolt("printer", new PrinterBolt()).shuffleGrouping("joiner"); + + Config conf = new Config(); + StormSubmitter.submitTopologyWithProgressBar("join-example", conf, builder.createTopology()); + + generateGenderData(genderSpout); + + generateAgeData(ageSpout); + } + + private static void generateAgeData(FeederSpout ageSpout) { + for (int i = 9; i >= 0; i--) { + ageSpout.feed(new Values(i, i + 20)); + } + } + + private static void generateGenderData(FeederSpout genderSpout) { + for (int i = 0; i < 10; i++) { + String gender; + if (i % 2 == 0) { + gender = "male"; + } else { + gender = "female"; + } + genderSpout.feed(new Values(i, gender)); + } + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/LambdaTopology.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/LambdaTopology.java new file mode 100644 index 00000000000..d5dec296896 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/LambdaTopology.java @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter; + +import java.util.UUID; +import org.apache.storm.Config; +import org.apache.storm.topology.ConfigurableTopology; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.tuple.Values; + +public class LambdaTopology extends ConfigurableTopology { + public static void main(String[] args) { + ConfigurableTopology.start(new LambdaTopology(), args); + } + + @Override + protected int run(String[] args) throws Exception { + TopologyBuilder builder = new TopologyBuilder(); + + // example. spout1: generate random strings + // bolt1: get the first part of a string + // bolt2: output the tuple + + // NOTE: Variable used in lambda expression should be final or effectively final + // (or it will cause compilation error), + // and variable type should implement the Serializable interface if it isn't primitive type + // (or it will cause not serializable exception). + Prefix prefix = new Prefix("Hello lambda:"); + String suffix = ":so cool!"; + int tag = 999; + + builder.setSpout("spout1", () -> UUID.randomUUID().toString()); + builder.setBolt("bolt1", (tuple, collector) -> { + String[] parts = tuple.getStringByField("lambda").split("\\-"); + collector.emit(new Values(prefix + parts[0] + suffix, tag)); + }, "strValue", "intValue").shuffleGrouping("spout1"); + builder.setBolt("bolt2", tuple -> System.out.println(tuple)).shuffleGrouping("bolt1"); + + Config conf = new Config(); + conf.setDebug(true); + conf.setNumWorkers(2); + + return submit("lambda-demo", conf, builder); + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/ManualDRPC.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/ManualDRPC.java new file mode 100644 index 00000000000..e685ca16c2e --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/ManualDRPC.java @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter; + +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.drpc.DRPCSpout; +import org.apache.storm.drpc.ReturnResults; +import org.apache.storm.topology.BasicOutputCollector; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.topology.base.BaseBasicBolt; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; +import org.apache.storm.utils.DRPCClient; + +@SuppressWarnings("checkstyle:AbbreviationAsWordInName") +public class ManualDRPC { + + public static void main(String[] args) throws Exception { + TopologyBuilder builder = new TopologyBuilder(); + + DRPCSpout spout = new DRPCSpout("exclamation"); + builder.setSpout("drpc", spout); + builder.setBolt("exclaim", new ExclamationBolt(), 3).shuffleGrouping("drpc"); + builder.setBolt("return", new ReturnResults(), 3).shuffleGrouping("exclaim"); + + Config conf = new Config(); + StormSubmitter.submitTopology("exclaim", conf, builder.createTopology()); + try (DRPCClient drpc = DRPCClient.getConfiguredClient(conf)) { + System.out.println(drpc.execute("exclamation", "aaa")); + System.out.println(drpc.execute("exclamation", "bbb")); + } + } + + public static class ExclamationBolt extends BaseBasicBolt { + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("result", "return-info")); + } + + @Override + public void execute(Tuple tuple, BasicOutputCollector collector) { + String arg = tuple.getString(0); + Object retInfo = tuple.getValue(1); + collector.emit(new Values(arg + "!!!", retInfo)); + } + + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/MultiThreadWordCountTopology.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/MultiThreadWordCountTopology.java new file mode 100644 index 00000000000..2d398550540 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/MultiThreadWordCountTopology.java @@ -0,0 +1,107 @@ +/* + Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + and limitations under the License. + */ + +package org.apache.storm.starter; + +import java.util.Map; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import org.apache.storm.Config; +import org.apache.storm.metric.LoggingMetricsConsumer; +import org.apache.storm.starter.bolt.WordCountBolt; +import org.apache.storm.starter.spout.RandomSentenceSpout; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.ConfigurableTopology; +import org.apache.storm.topology.IRichBolt; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; + +/** + * Some topologies might spawn some threads within bolts to do some work and emit tuples from those threads. + * This is a simple wordcount topology example that mimics those use cases and might help us catch possible race conditions. + */ +public class MultiThreadWordCountTopology extends ConfigurableTopology { + public static void main(String[] args) { + ConfigurableTopology.start(new MultiThreadWordCountTopology(), args); + } + + @Override + protected int run(String[] args) { + + TopologyBuilder builder = new TopologyBuilder(); + + builder.setSpout("spout", new RandomSentenceSpout(), 1); + builder.setBolt("split", new MultiThreadedSplitSentence(), 1).shuffleGrouping("spout"); + builder.setBolt("count", new WordCountBolt(), 1).fieldsGrouping("split", new Fields("word")); + + //this makes sure there is only one executor per worker, easier to debug + //problems involving serialization/deserialization will only happen in inter-worker data transfer + conf.put(Config.TOPOLOGY_RAS_ONE_EXECUTOR_PER_WORKER, true); + //this involves metricsTick + conf.registerMetricsConsumer(LoggingMetricsConsumer.class); + + conf.setTopologyWorkerMaxHeapSize(128); + + String topologyName = "multithreaded-word-count"; + + if (args != null && args.length > 0) { + topologyName = args[0]; + } + return submit(topologyName, conf, builder); + } + + public static class MultiThreadedSplitSentence implements IRichBolt { + + private OutputCollector collector; + private ExecutorService executor; + + @Override + public void prepare(Map topoConf, TopologyContext context, OutputCollector collector) { + this.collector = collector; + executor = Executors.newFixedThreadPool(6); + //This makes sure metricsTick to be called every 1 second + //it makes the race condition between metricsTick and outputCollector easier to happen if any + context.registerMetric("dummy-counter", () -> 0, 1); + } + + @Override + public void execute(Tuple input) { + String str = input.getString(0); + String[] splits = str.split("\\s+"); + for (String s : splits) { + //spawn other threads to do the work and emit + Runnable runnableTask = () -> { + collector.emit(new Values(s)); + }; + executor.submit(runnableTask); + } + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("word")); + } + + @Override + public Map getComponentConfiguration() { + return null; + } + + @Override + public void cleanup() { + } + } +} \ No newline at end of file diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/MultipleLoggerTopology.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/MultipleLoggerTopology.java new file mode 100644 index 00000000000..9410931553f --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/MultipleLoggerTopology.java @@ -0,0 +1,90 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter; + +import java.util.Map; +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.testing.TestWordSpout; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.topology.base.BaseRichBolt; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This is a basic example of a Storm topology. + */ +public class MultipleLoggerTopology { + public static void main(String[] args) throws Exception { + TopologyBuilder builder = new TopologyBuilder(); + + builder.setSpout("word", new TestWordSpout(), 10); + builder.setBolt("exclaim1", new ExclamationLoggingBolt(), 3).shuffleGrouping("word"); + builder.setBolt("exclaim2", new ExclamationLoggingBolt(), 2).shuffleGrouping("exclaim1"); + + Config conf = new Config(); + conf.setDebug(true); + String topoName = MultipleLoggerTopology.class.getName(); + if (args != null && args.length > 0) { + topoName = args[0]; + } + conf.setNumWorkers(2); + StormSubmitter.submitTopologyWithProgressBar(topoName, conf, builder.createTopology()); + } + + public static class ExclamationLoggingBolt extends BaseRichBolt { + OutputCollector collector; + Logger rootLogger = LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME); + // ensure the loggers are configured in the worker.xml before + // trying to use them here + Logger logger = LoggerFactory.getLogger("com.myapp"); + Logger subLogger = LoggerFactory.getLogger("com.myapp.sub"); + + @Override + public void prepare(Map conf, TopologyContext context, OutputCollector collector) { + this.collector = collector; + } + + @Override + public void execute(Tuple tuple) { + rootLogger.debug("root: This is a DEBUG message"); + rootLogger.info("root: This is an INFO message"); + rootLogger.warn("root: This is a WARN message"); + rootLogger.error("root: This is an ERROR message"); + + logger.debug("myapp: This is a DEBUG message"); + logger.info("myapp: This is an INFO message"); + logger.warn("myapp: This is a WARN message"); + logger.error("myapp: This is an ERROR message"); + + subLogger.debug("myapp.sub: This is a DEBUG message"); + subLogger.info("myapp.sub: This is an INFO message"); + subLogger.warn("myapp.sub: This is a WARN message"); + subLogger.error("myapp.sub: This is an ERROR message"); + + collector.emit(tuple, new Values(tuple.getString(0) + "!!!")); + collector.ack(tuple); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("word")); + } + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/PersistentWindowingTopology.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/PersistentWindowingTopology.java new file mode 100644 index 00000000000..62b3ed577ec --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/PersistentWindowingTopology.java @@ -0,0 +1,173 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.starter; + +import static org.apache.storm.topology.base.BaseWindowedBolt.Duration; + +import java.util.Iterator; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.starter.spout.RandomIntegerSpout; +import org.apache.storm.state.KeyValueState; +import org.apache.storm.streams.Pair; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.topology.base.BaseStatefulWindowedBolt; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; +import org.apache.storm.windowing.TupleWindow; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * An example that demonstrates the usage of {@link org.apache.storm.topology.IStatefulWindowedBolt} with window persistence. + *

+ * The framework automatically checkpoints the tuples in the window along with the bolt's state and restores the same during restarts. + *

+ * + *

+ * This topology uses 'redis' for state persistence, so you should also start a redis instance before deploying. If you are running in local + * mode you can just start a redis server locally which will be used for storing the state. The default RedisKeyValueStateProvider + * parameters can be overridden by setting {@link Config#TOPOLOGY_STATE_PROVIDER_CONFIG}, for e.g. + *

+ * {
+ *   "jedisPoolConfig": {
+ *     "host": "redis-server-host",
+ *     "port": 6379,
+ *     "timeout": 2000,
+ *     "database": 0,
+ *     "password": "xyz"
+ *   }
+ * }
+ * 
+ *

+ */ +public class PersistentWindowingTopology { + private static final Logger LOG = LoggerFactory.getLogger(PersistentWindowingTopology.class); + + /** + * Create and deploy the topology. + * + * @param args args + * @throws Exception exception + */ + public static void main(String[] args) throws Exception { + TopologyBuilder builder = new TopologyBuilder(); + + // generate random numbers + builder.setSpout("spout", new RandomIntegerSpout()); + + // emits sliding window and global averages + builder.setBolt("avgbolt", new AvgBolt() + .withWindow(new Duration(10, TimeUnit.SECONDS), new Duration(2, TimeUnit.SECONDS)) + // persist the window in state + .withPersistence() + // max number of events to be cached in memory + .withMaxEventsInMemory(25000), 1) + .shuffleGrouping("spout"); + + // print the values to stdout + builder.setBolt("printer", (x, y) -> System.out.println(x.getValue(0)), 1).shuffleGrouping("avgbolt"); + + Config conf = new Config(); + conf.setDebug(false); + + // checkpoint the state every 5 seconds + conf.put(Config.TOPOLOGY_STATE_CHECKPOINT_INTERVAL, 5000); + + // use redis for state persistence + conf.put(Config.TOPOLOGY_STATE_PROVIDER, "org.apache.storm.redis.state.RedisKeyValueStateProvider"); + + String topoName = "test"; + if (args != null && args.length > 0) { + topoName = args[0]; + } + conf.setNumWorkers(1); + StormSubmitter.submitTopologyWithProgressBar(topoName, conf, builder.createTopology()); + } + + // wrapper to hold global and window averages + private static class Averages { + private final double global; + private final double window; + + Averages(double global, double window) { + this.global = global; + this.window = window; + } + + @Override + public String toString() { + return "Averages{" + "global=" + String.format("%.2f", global) + ", window=" + String.format("%.2f", window) + '}'; + } + } + + /** + * A bolt that uses stateful persistence to store the windows along with the state (global avg). + */ + private static class AvgBolt extends BaseStatefulWindowedBolt>> { + private static final String STATE_KEY = "avg"; + + private OutputCollector collector; + private KeyValueState> state; + private Pair globalAvg; + + @Override + public void prepare(Map topoConf, TopologyContext context, OutputCollector collector) { + this.collector = collector; + } + + @Override + public void initState(KeyValueState> state) { + this.state = state; + globalAvg = state.get(STATE_KEY, Pair.of(0L, 0L)); + LOG.info("initState with global avg [" + (double) globalAvg.getFirst() / globalAvg.getSecond() + "]"); + } + + @Override + public void execute(TupleWindow window) { + int sum = 0; + int count = 0; + // iterate over tuples in the current window + Iterator it = window.getIter(); + while (it.hasNext()) { + Tuple tuple = it.next(); + sum += tuple.getInteger(0); + ++count; + } + LOG.debug("Count : {}", count); + globalAvg = Pair.of(globalAvg.getFirst() + sum, globalAvg.getSecond() + count); + // update the value in state + state.put(STATE_KEY, globalAvg); + // emit the averages downstream + collector.emit(new Values(new Averages((double) globalAvg.getFirst() / globalAvg.getSecond(), (double) sum / count))); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("avg")); + } + } + +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/Prefix.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/Prefix.java new file mode 100644 index 00000000000..31f200a83dc --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/Prefix.java @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter; + +import java.io.Serializable; + +class Prefix implements Serializable { + private String str; + + Prefix(String str) { + this.str = str; + } + + @Override + public String toString() { + return this.str; + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/ReachTopology.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/ReachTopology.java new file mode 100644 index 00000000000..59051326942 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/ReachTopology.java @@ -0,0 +1,191 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.coordination.BatchOutputCollector; +import org.apache.storm.drpc.LinearDRPCTopologyBuilder; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.BasicOutputCollector; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.base.BaseBasicBolt; +import org.apache.storm.topology.base.BaseBatchBolt; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; +import org.apache.storm.utils.DRPCClient; + +/** + * This is a good example of doing complex Distributed RPC on top of Storm. This program creates a topology that can + * compute the reach for any URL on Twitter in realtime by parallelizing the whole computation. + * + *

Reach is the number of unique people exposed to a URL on Twitter. To compute reach, you have to get all the people + * who tweeted the URL, get all the followers of all those people, unique that set of followers, and then count the + * unique set. It's an intense computation that can involve thousands of database calls and tens of millions of follower + * records. + * + *

This Storm topology does every piece of that computation in parallel, turning what would be a computation that takes + * minutes on a single machine into one that takes just a couple seconds. + * + *

For the purposes of demonstration, this topology replaces the use of actual DBs with in-memory hashmaps. + * + * @see Distributed RPC + */ +public class ReachTopology { + public static Map> TWEETERS_DB = new HashMap>() { + { + put("foo.com/blog/1", Arrays.asList("sally", "bob", "tim", "george", "nathan")); + put("engineering.twitter.com/blog/5", Arrays.asList("adam", "david", "sally", "nathan")); + put("tech.backtype.com/blog/123", Arrays.asList("tim", "mike", "john")); + } + }; + + public static Map> FOLLOWERS_DB = new HashMap>() { + { + put("sally", Arrays.asList("bob", "tim", "alice", "adam", "jim", "chris", "jai")); + put("bob", Arrays.asList("sally", "nathan", "jim", "mary", "david", "vivian")); + put("tim", Arrays.asList("alex")); + put("nathan", Arrays.asList("sally", "bob", "adam", "harry", "chris", "vivian", "emily", "jordan")); + put("adam", Arrays.asList("david", "carissa")); + put("mike", Arrays.asList("john", "bob")); + put("john", Arrays.asList("alice", "nathan", "jim", "mike", "bob")); + } + }; + + public static LinearDRPCTopologyBuilder construct() { + LinearDRPCTopologyBuilder builder = new LinearDRPCTopologyBuilder("reach"); + builder.addBolt(new GetTweeters(), 4); + builder.addBolt(new GetFollowers(), 12).shuffleGrouping(); + builder.addBolt(new PartialUniquer(), 6).fieldsGrouping(new Fields("id", "follower")); + builder.addBolt(new CountAggregator(), 3).fieldsGrouping(new Fields("id")); + return builder; + } + + public static void main(String[] args) throws Exception { + LinearDRPCTopologyBuilder builder = construct(); + + Config conf = new Config(); + conf.setNumWorkers(6); + String topoName = "reach-drpc"; + if (args.length > 0) { + topoName = args[0]; + } + StormSubmitter.submitTopologyWithProgressBar(topoName, conf, builder.createRemoteTopology()); + + try (DRPCClient drpc = DRPCClient.getConfiguredClient(conf)) { + String[] urlsToTry = new String[]{ "foo.com/blog/1", "engineering.twitter.com/blog/5", "notaurl.com" }; + for (String url : urlsToTry) { + System.out.println("Reach of " + url + ": " + drpc.execute("reach", url)); + } + } + } + + public static class GetTweeters extends BaseBasicBolt { + @Override + public void execute(Tuple tuple, BasicOutputCollector collector) { + Object id = tuple.getValue(0); + String url = tuple.getString(1); + List tweeters = TWEETERS_DB.get(url); + if (tweeters != null) { + for (String tweeter : tweeters) { + collector.emit(new Values(id, tweeter)); + } + } + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("id", "tweeter")); + } + } + + public static class GetFollowers extends BaseBasicBolt { + @Override + public void execute(Tuple tuple, BasicOutputCollector collector) { + Object id = tuple.getValue(0); + String tweeter = tuple.getString(1); + List followers = FOLLOWERS_DB.get(tweeter); + if (followers != null) { + for (String follower : followers) { + collector.emit(new Values(id, follower)); + } + } + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("id", "follower")); + } + } + + public static class PartialUniquer extends BaseBatchBolt { + BatchOutputCollector collector; + Object id; + Set followers = new HashSet(); + + @Override + public void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, Object id) { + this.collector = collector; + this.id = id; + } + + @Override + public void execute(Tuple tuple) { + followers.add(tuple.getString(1)); + } + + @Override + public void finishBatch() { + collector.emit(new Values(id, followers.size())); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("id", "partial-count")); + } + } + + public static class CountAggregator extends BaseBatchBolt { + BatchOutputCollector collector; + Object id; + int count = 0; + + @Override + public void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, Object id) { + this.collector = collector; + this.id = id; + } + + @Override + public void execute(Tuple tuple) { + count += tuple.getInteger(1); + } + + @Override + public void finishBatch() { + collector.emit(new Values(id, count)); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("id", "reach")); + } + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/ResourceAwareExampleTopology.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/ResourceAwareExampleTopology.java new file mode 100644 index 00000000000..20f951e7602 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/ResourceAwareExampleTopology.java @@ -0,0 +1,150 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter; + +import java.util.Iterator; +import java.util.Map; +import java.util.Map.Entry; +import java.util.concurrent.ConcurrentHashMap; +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.testing.TestWordSpout; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.SharedOffHeapWithinNode; +import org.apache.storm.topology.SharedOnHeap; +import org.apache.storm.topology.SpoutDeclarer; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.topology.base.BaseRichBolt; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; + +public class ResourceAwareExampleTopology { + public static void main(String[] args) throws Exception { + TopologyBuilder builder = new TopologyBuilder(); + + //A topology can set resources in terms of CPU and Memory for each component + // These can be chained (like with setting the CPU requirement) + SpoutDeclarer spout = builder.setSpout("word", new TestWordSpout(), 10).setCPULoad(20); + // Or done separately like with setting the + // onheap and offheap memory requirement + spout.setMemoryLoad(64, 16); + //On heap memory is used to help calculate the heap of the java process for the worker + // off heap memory is for things like JNI memory allocated off heap, or when using the + // ShellBolt or ShellSpout. In this case the 16 MB of off heap is just as an example + // as we are not using it. + + // Some times a Bolt or Spout will have some memory that is shared between the instances + // These are typically caches, but could be anything like a static database that is memory + // mapped into the processes. These can be declared separately and added to the bolts and + // spouts that use them. Or if only one uses it they can be created inline with the add + SharedOnHeap exclaimCache = new SharedOnHeap(100, "exclaim-cache"); + SharedOffHeapWithinNode notImplementedButJustAnExample = + new SharedOffHeapWithinNode(500, "not-implemented-node-level-cache"); + + //If CPU or memory is not set the values stored in topology.component.resources.onheap.memory.mb, + // topology.component.resources.offheap.memory.mb and topology.component.cpu.pcore.percent + // will be used instead + builder + .setBolt("exclaim1", new ExclamationBolt(), 3) + .shuffleGrouping("word") + .addSharedMemory(exclaimCache); + + builder + .setBolt("exclaim2", new ExclamationBolt(), 2) + .shuffleGrouping("exclaim1") + .setMemoryLoad(100) + .addSharedMemory(exclaimCache) + .addSharedMemory(notImplementedButJustAnExample); + + Config conf = new Config(); + conf.setDebug(true); + + //Under RAS the number of workers is determined by the scheduler and the settings in the conf are ignored + //conf.setNumWorkers(3); + + //Instead the scheduler lets you set the maximum heap size for any worker. + conf.setTopologyWorkerMaxHeapSize(1024.0); + //The scheduler generally will try to pack executors into workers until the max heap size is met, but + // this can vary depending on the specific scheduling strategy selected. + // The reason for this is to try and balance the maximum pause time GC might take (which is larger for larger heaps) + // against better performance because of not needing to serialize/deserialize tuples. + + //The priority of a topology describes the importance of the topology in decreasing importance + // starting from 0 (i.e. 0 is the highest priority and the priority importance decreases as the priority number increases). + //Recommended range of 0-29 but no hard limit set. + // If there are not enough resources in a cluster the priority in combination with how far over a guarantees + // a user is will decide which topologies are run and which ones are not. + conf.setTopologyPriority(29); + + //set to use the default resource aware strategy when using the MultitenantResourceAwareBridgeScheduler + conf.setTopologyStrategy( + "org.apache.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategy"); + + String topoName = "test"; + if (args != null && args.length > 0) { + topoName = args[0]; + } + + StormSubmitter.submitTopologyWithProgressBar(topoName, conf, builder.createTopology()); + } + + public static class ExclamationBolt extends BaseRichBolt { + //Have a crummy cache to show off shared memory accounting + private static final ConcurrentHashMap myCrummyCache = + new ConcurrentHashMap<>(); + private static final int CACHE_SIZE = 100_000; + OutputCollector collector; + + protected static String getFromCache(String key) { + return myCrummyCache.get(key); + } + + protected static void addToCache(String key, String value) { + myCrummyCache.putIfAbsent(key, value); + int numToRemove = myCrummyCache.size() - CACHE_SIZE; + if (numToRemove > 0) { + //Remove something randomly... + Iterator> it = myCrummyCache.entrySet().iterator(); + for (; numToRemove > 0 && it.hasNext(); numToRemove--) { + it.next(); + it.remove(); + } + } + } + + @Override + public void prepare(Map conf, TopologyContext context, OutputCollector collector) { + this.collector = collector; + } + + @Override + public void execute(Tuple tuple) { + String orig = tuple.getString(0); + String ret = getFromCache(orig); + if (ret == null) { + ret = orig + "!!!"; + addToCache(orig, ret); + } + collector.emit(tuple, new Values(ret)); + collector.ack(tuple); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("word")); + } + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/RollingTopWords.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/RollingTopWords.java new file mode 100644 index 00000000000..ea40e8bfaad --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/RollingTopWords.java @@ -0,0 +1,81 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter; + +import org.apache.storm.starter.bolt.IntermediateRankingsBolt; +import org.apache.storm.starter.bolt.RollingCountBolt; +import org.apache.storm.starter.bolt.TotalRankingsBolt; +import org.apache.storm.testing.TestWordSpout; +import org.apache.storm.topology.ConfigurableTopology; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.tuple.Fields; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This topology does a continuous computation of the top N words that the + * topology has seen in terms of cardinality. The top N computation is done in a + * completely scalable way, and a similar approach could be used to compute + * things like trending topics or trending images on Twitter. + */ +public class RollingTopWords extends ConfigurableTopology { + + private static final Logger LOG = LoggerFactory.getLogger(RollingTopWords.class); + private static final int TOP_N = 5; + + private RollingTopWords() { + } + + public static void main(String[] args) throws Exception { + ConfigurableTopology.start(new RollingTopWords(), args); + } + + /** + * Submits (runs) the topology. + * + *

Usage: "RollingTopWords [topology-name] [-local]" + * + *

By default, the topology is run locally under the name + * "slidingWindowCounts". + * + *

Examples: + * ``` + * # Runs in remote/cluster mode, with topology name "production-topology" + * $ storm jar storm-starter-jar-with-dependencies.jar org.apache.storm.starter.RollingTopWords production-topology ``` + * + * @param args + * First positional argument (optional) is topology name, second + * positional argument (optional) defines whether to run the topology + * locally ("-local") or remotely, i.e. on a real cluster. + */ + @Override + protected int run(String[] args) { + String topologyName = "slidingWindowCounts"; + if (args.length >= 1) { + topologyName = args[0]; + } + TopologyBuilder builder = new TopologyBuilder(); + String spoutId = "wordGenerator"; + String counterId = "counter"; + String intermediateRankerId = "intermediateRanker"; + builder.setSpout(spoutId, new TestWordSpout(), 5); + builder.setBolt(counterId, new RollingCountBolt(9, 3), 4).fieldsGrouping(spoutId, new Fields("word")); + builder.setBolt(intermediateRankerId, new IntermediateRankingsBolt(TOP_N), 4).fieldsGrouping(counterId, + new Fields("obj")); + String totalRankerId = "finalRanker"; + builder.setBolt(totalRankerId, new TotalRankingsBolt(TOP_N)).globalGrouping(intermediateRankerId); + LOG.info("Topology name: " + topologyName); + + return submit(topologyName, conf, builder); + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/SingleJoinExample.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/SingleJoinExample.java new file mode 100644 index 00000000000..ad3ab3d48e2 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/SingleJoinExample.java @@ -0,0 +1,61 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter; + +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.starter.bolt.SingleJoinBolt; +import org.apache.storm.testing.FeederSpout; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Values; +import org.apache.storm.utils.NimbusClient; + +/** + * Example of using a simple custom join bolt. + * NOTE: Prefer to use the built-in JoinBolt wherever applicable + */ +public class SingleJoinExample { + public static void main(String[] args) throws Exception { + if (!NimbusClient.isLocalOverride()) { + throw new IllegalStateException("This example only works in local mode. " + + "Run with storm local not storm jar"); + } + FeederSpout genderSpout = new FeederSpout(new Fields("id", "gender")); + FeederSpout ageSpout = new FeederSpout(new Fields("id", "age")); + + TopologyBuilder builder = new TopologyBuilder(); + builder.setSpout("gender", genderSpout); + builder.setSpout("age", ageSpout); + builder.setBolt("join", new SingleJoinBolt(new Fields("gender", "age"))).fieldsGrouping("gender", new Fields("id")) + .fieldsGrouping("age", new Fields("id")); + + Config conf = new Config(); + conf.setDebug(true); + StormSubmitter.submitTopology("join-example", conf, builder.createTopology()); + + for (int i = 0; i < 10; i++) { + String gender; + if (i % 2 == 0) { + gender = "male"; + } else { + gender = "female"; + } + genderSpout.feed(new Values(i, gender)); + } + + for (int i = 9; i >= 0; i--) { + ageSpout.feed(new Values(i, i + 20)); + } + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/SkewedRollingTopWords.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/SkewedRollingTopWords.java new file mode 100644 index 00000000000..a983b7c3d24 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/SkewedRollingTopWords.java @@ -0,0 +1,86 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter; + +import org.apache.storm.starter.bolt.IntermediateRankingsBolt; +import org.apache.storm.starter.bolt.RollingCountAggBolt; +import org.apache.storm.starter.bolt.RollingCountBolt; +import org.apache.storm.starter.bolt.TotalRankingsBolt; +import org.apache.storm.testing.TestWordSpout; +import org.apache.storm.topology.ConfigurableTopology; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.tuple.Fields; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This topology does a continuous computation of the top N words that the + * topology has seen in terms of cardinality. The top N computation is done in a + * completely scalable way, and a similar approach could be used to compute + * things like trending topics or trending images on Twitter. It takes an + * approach that assumes that some works will be much more common then other + * words, and uses partialKeyGrouping to better balance the skewed load. + */ +public class SkewedRollingTopWords extends ConfigurableTopology { + + private static final Logger LOG = LoggerFactory.getLogger(SkewedRollingTopWords.class); + private static final int TOP_N = 5; + + private SkewedRollingTopWords() { + } + + public static void main(String[] args) throws Exception { + ConfigurableTopology.start(new SkewedRollingTopWords(), args); + } + + /** + * Submits (runs) the topology. + * + *

Usage: "SkewedRollingTopWords [topology-name] [-local]" + * + *

By default, the topology is run locally under the name + * "slidingWindowCounts". + * + *

Examples: + * + *

``` + * # Runs in remote/cluster mode, with topology name "production-topology" + * $ storm jar storm-starter-jar-with-dependencies.jar org.apache.storm.starter.SkewedRollingTopWords production-topology ``` + * + * @param args + * First positional argument (optional) is topology name, second + * positional argument (optional) defines whether to run the topology + * locally ("-local") or remotely, i.e. on a real cluster + */ + @Override + protected int run(String[] args) { + String topologyName = "slidingWindowCounts"; + if (args.length >= 1) { + topologyName = args[0]; + } + TopologyBuilder builder = new TopologyBuilder(); + String spoutId = "wordGenerator"; + String counterId = "counter"; + String aggId = "aggregator"; + builder.setSpout(spoutId, new TestWordSpout(), 5); + builder.setBolt(counterId, new RollingCountBolt(9, 3), 4).partialKeyGrouping(spoutId, new Fields("word")); + builder.setBolt(aggId, new RollingCountAggBolt(), 4).fieldsGrouping(counterId, new Fields("obj")); + String intermediateRankerId = "intermediateRanker"; + builder.setBolt(intermediateRankerId, new IntermediateRankingsBolt(TOP_N), 4).fieldsGrouping(aggId, new Fields("obj")); + String totalRankerId = "finalRanker"; + builder.setBolt(totalRankerId, new TotalRankingsBolt(TOP_N)).globalGrouping(intermediateRankerId); + LOG.info("Topology name: " + topologyName); + + return submit(topologyName, conf, builder); + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/SlidingTupleTsTopology.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/SlidingTupleTsTopology.java new file mode 100644 index 00000000000..d179899d69b --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/SlidingTupleTsTopology.java @@ -0,0 +1,50 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter; + +import java.util.concurrent.TimeUnit; +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.starter.bolt.PrinterBolt; +import org.apache.storm.starter.bolt.SlidingWindowSumBolt; +import org.apache.storm.starter.spout.RandomIntegerSpout; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.topology.base.BaseWindowedBolt; +import org.apache.storm.topology.base.BaseWindowedBolt.Duration; + +/** + * Windowing based on tuple timestamp (e.g. the time when tuple is generated + * rather than when its processed). + */ +public class SlidingTupleTsTopology { + public static void main(String[] args) throws Exception { + TopologyBuilder builder = new TopologyBuilder(); + BaseWindowedBolt bolt = new SlidingWindowSumBolt() + .withWindow(new Duration(5, TimeUnit.SECONDS), new Duration(3, TimeUnit.SECONDS)) + .withTimestampField("ts") + .withLag(new Duration(5, TimeUnit.SECONDS)); + builder.setSpout("integer", new RandomIntegerSpout(), 1); + builder.setBolt("slidingsum", bolt, 1).shuffleGrouping("integer"); + builder.setBolt("printer", new PrinterBolt(), 1).shuffleGrouping("slidingsum"); + Config conf = new Config(); + conf.setDebug(true); + String topoName = "test"; + + if (args != null && args.length > 0) { + topoName = args[0]; + } + + conf.setNumWorkers(1); + StormSubmitter.submitTopologyWithProgressBar(topoName, conf, builder.createTopology()); + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/SlidingWindowTopology.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/SlidingWindowTopology.java new file mode 100644 index 00000000000..fda309df70f --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/SlidingWindowTopology.java @@ -0,0 +1,94 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter; + +import java.util.List; +import java.util.Map; +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.starter.bolt.PrinterBolt; +import org.apache.storm.starter.bolt.SlidingWindowSumBolt; +import org.apache.storm.starter.spout.RandomIntegerSpout; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.topology.base.BaseWindowedBolt; +import org.apache.storm.topology.base.BaseWindowedBolt.Count; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; +import org.apache.storm.windowing.TupleWindow; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A sample topology that demonstrates the usage of {@link org.apache.storm.topology.IWindowedBolt} + * to calculate sliding window sum. + */ +public class SlidingWindowTopology { + + private static final Logger LOG = LoggerFactory.getLogger(SlidingWindowTopology.class); + + public static void main(String[] args) throws Exception { + TopologyBuilder builder = new TopologyBuilder(); + builder.setSpout("integer", new RandomIntegerSpout(), 1); + builder.setBolt("slidingsum", new SlidingWindowSumBolt().withWindow(Count.of(30), Count.of(10)), 1) + .shuffleGrouping("integer"); + builder.setBolt("tumblingavg", new TumblingWindowAvgBolt().withTumblingWindow(Count.of(3)), 1) + .shuffleGrouping("slidingsum"); + builder.setBolt("printer", new PrinterBolt(), 1).shuffleGrouping("tumblingavg"); + Config conf = new Config(); + conf.setDebug(true); + String topoName = "test"; + if (args != null && args.length > 0) { + topoName = args[0]; + } + conf.setNumWorkers(1); + StormSubmitter.submitTopologyWithProgressBar(topoName, conf, builder.createTopology()); + } + + /** + * Computes tumbling window average. + */ + private static class TumblingWindowAvgBolt extends BaseWindowedBolt { + private OutputCollector collector; + + @Override + public void prepare(Map topoConf, TopologyContext context, OutputCollector collector) { + this.collector = collector; + } + + @Override + public void execute(TupleWindow inputWindow) { + int sum = 0; + List tuplesInWindow = inputWindow.get(); + LOG.debug("Events in current window: " + tuplesInWindow.size()); + if (tuplesInWindow.size() > 0) { + /* + * Since this is a tumbling window calculation, + * we use all the tuples in the window to compute the avg. + */ + for (Tuple tuple : tuplesInWindow) { + sum += (int) tuple.getValue(0); + } + collector.emit(new Values(sum / tuplesInWindow.size())); + } + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("avg")); + } + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/StatefulTopology.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/StatefulTopology.java new file mode 100644 index 00000000000..3f30f4866c0 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/StatefulTopology.java @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.starter; + +import java.util.Map; +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.starter.spout.RandomIntegerSpout; +import org.apache.storm.state.KeyValueState; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.BasicOutputCollector; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.topology.base.BaseBasicBolt; +import org.apache.storm.topology.base.BaseStatefulBolt; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * An example topology that demonstrates the use of {@link org.apache.storm.topology.IStatefulBolt} to manage state. To run the example, + *

+ * $ storm jar examples/storm-starter/storm-starter-topologies-*.jar storm.starter.StatefulTopology statetopology
+ * 
+ *

+ * The default state used is 'InMemoryKeyValueState' which does not persist the state across restarts. You could use 'RedisKeyValueState' to + * test state persistence by setting below property in conf/storm.yaml + *

+ * topology.state.provider: org.apache.storm.redis.state.RedisKeyValueStateProvider
+ * 
+ *

+ * You should also start a local redis instance before running the 'storm jar' command. The default RedisKeyValueStateProvider parameters + * can be overridden in conf/storm.yaml, for e.g. + *

+ *

+ * topology.state.provider.config: '{"keyClass":"...", "valueClass":"...",
+ *                                   "keySerializerClass":"...", "valueSerializerClass":"...",
+ *                                   "jedisPoolConfig":{"host":"localhost", "port":6379,
+ *                                      "timeout":2000, "database":0, "password":"xyz"}}'
+ *
+ * 
+ *

+ */ +public class StatefulTopology { + private static final Logger LOG = LoggerFactory.getLogger(StatefulTopology.class); + + public static void main(String[] args) throws Exception { + TopologyBuilder builder = new TopologyBuilder(); + builder.setSpout("spout", new RandomIntegerSpout()); + builder.setBolt("partialsum", new StatefulSumBolt("partial"), 1).shuffleGrouping("spout"); + builder.setBolt("printer", new PrinterBolt(), 2).shuffleGrouping("partialsum"); + builder.setBolt("total", new StatefulSumBolt("total"), 1).shuffleGrouping("printer"); + Config conf = new Config(); + conf.setDebug(false); + String topoName = "test"; + if (args != null && args.length > 0) { + topoName = args[0]; + } + conf.setNumWorkers(1); + StormSubmitter.submitTopologyWithProgressBar(topoName, conf, builder.createTopology()); + } + + /** + * A bolt that uses {@link KeyValueState} to save its state. + */ + private static class StatefulSumBolt extends BaseStatefulBolt> { + String name; + KeyValueState kvState; + long sum; + private OutputCollector collector; + + StatefulSumBolt(String name) { + this.name = name; + } + + @Override + public void prepare(Map topoConf, TopologyContext context, OutputCollector collector) { + this.collector = collector; + } + + @Override + public void execute(Tuple input) { + sum += ((Number) input.getValueByField("value")).longValue(); + LOG.debug("{} sum = {}", name, sum); + kvState.put("sum", sum); + collector.emit(input, new Values(sum)); + collector.ack(input); + } + + @Override + public void initState(KeyValueState state) { + kvState = state; + sum = kvState.get("sum", 0L); + LOG.debug("Initstate, sum from saved state = {} ", sum); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("value")); + } + } + + public static class PrinterBolt extends BaseBasicBolt { + @Override + public void execute(Tuple tuple, BasicOutputCollector collector) { + System.out.println(tuple); + LOG.debug("Got tuple {}", tuple); + collector.emit(tuple.getValues()); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer ofd) { + ofd.declare(new Fields("value")); + } + + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/StatefulWindowingTopology.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/StatefulWindowingTopology.java new file mode 100644 index 00000000000..eb0132e4f53 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/StatefulWindowingTopology.java @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.starter; + +import java.util.Map; +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.starter.bolt.PrinterBolt; +import org.apache.storm.starter.spout.RandomIntegerSpout; +import org.apache.storm.state.KeyValueState; +import org.apache.storm.state.State; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.topology.base.BaseStatefulWindowedBolt; +import org.apache.storm.topology.base.BaseWindowedBolt.Count; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; +import org.apache.storm.windowing.TupleWindow; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A simple example that demonstrates the usage of {@link org.apache.storm.topology.IStatefulWindowedBolt} to save the state of the + * windowing operation to avoid re-computation in case of failures. + *

+ * The framework internally manages the window boundaries and does not invoke + * {@link org.apache.storm.topology.IWindowedBolt#execute(TupleWindow)} + * for the already evaluated windows in case of restarts during failures. The + * {@link org.apache.storm.topology.IStatefulBolt#initState(State)} + * is invoked with the previously saved state of the bolt after prepare, before the execute() method is invoked. + *

+ */ +public class StatefulWindowingTopology { + private static final Logger LOG = LoggerFactory.getLogger(StatefulWindowingTopology.class); + + public static void main(String[] args) throws Exception { + TopologyBuilder builder = new TopologyBuilder(); + builder.setSpout("spout", new RandomIntegerSpout()); + builder.setBolt("sumbolt", new WindowSumBolt().withWindow(new Count(5), new Count(3)) + .withMessageIdField("msgid"), 1).shuffleGrouping("spout"); + builder.setBolt("printer", new PrinterBolt(), 1).shuffleGrouping("sumbolt"); + Config conf = new Config(); + conf.setDebug(false); + //conf.put(Config.TOPOLOGY_STATE_PROVIDER, "org.apache.storm.redis.state.RedisKeyValueStateProvider"); + + String topoName = "test"; + if (args != null && args.length > 0) { + topoName = args[0]; + } + conf.setNumWorkers(1); + StormSubmitter.submitTopologyWithProgressBar(topoName, conf, builder.createTopology()); + } + + private static class WindowSumBolt extends BaseStatefulWindowedBolt> { + private KeyValueState state; + private long sum; + + private OutputCollector collector; + + @Override + public void prepare(Map topoConf, TopologyContext context, OutputCollector collector) { + this.collector = collector; + } + + @Override + public void initState(KeyValueState state) { + this.state = state; + sum = state.get("sum", 0L); + LOG.debug("initState with state [" + state + "] current sum [" + sum + "]"); + } + + @Override + public void execute(TupleWindow inputWindow) { + for (Tuple tuple : inputWindow.get()) { + sum += tuple.getIntegerByField("value"); + } + state.put("sum", sum); + collector.emit(new Values(sum)); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("sum")); + } + } + +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/WordCountTopology.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/WordCountTopology.java new file mode 100644 index 00000000000..71a3b42ed28 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/WordCountTopology.java @@ -0,0 +1,77 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter; + +import java.util.HashMap; +import java.util.Map; +import org.apache.storm.starter.bolt.WordCountBolt; +import org.apache.storm.starter.spout.RandomSentenceSpout; +import org.apache.storm.task.ShellBolt; +import org.apache.storm.topology.BasicOutputCollector; +import org.apache.storm.topology.ConfigurableTopology; +import org.apache.storm.topology.IRichBolt; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.topology.base.BaseBasicBolt; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; + +/** + * This topology demonstrates Storm's stream groupings and multilang + * capabilities. + */ +public class WordCountTopology extends ConfigurableTopology { + public static void main(String[] args) throws Exception { + ConfigurableTopology.start(new WordCountTopology(), args); + } + + @Override + protected int run(String[] args) throws Exception { + + TopologyBuilder builder = new TopologyBuilder(); + + builder.setSpout("spout", new RandomSentenceSpout(), 5); + + builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout"); + builder.setBolt("count", new WordCountBolt(), 12).fieldsGrouping("split", new Fields("word")); + + conf.setDebug(true); + + String topologyName = "word-count"; + + conf.setNumWorkers(3); + + if (args != null && args.length > 0) { + topologyName = args[0]; + } + return submit(topologyName, conf, builder); + } + + public static class SplitSentence extends ShellBolt implements IRichBolt { + + public SplitSentence() { + super("python3", "splitsentence.py"); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("word")); + } + + @Override + public Map getComponentConfiguration() { + return null; + } + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/WordCountTopologyNode.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/WordCountTopologyNode.java new file mode 100644 index 00000000000..6df76789311 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/WordCountTopologyNode.java @@ -0,0 +1,108 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter; + +import java.util.HashMap; +import java.util.Map; +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.spout.ShellSpout; +import org.apache.storm.task.ShellBolt; +import org.apache.storm.topology.BasicOutputCollector; +import org.apache.storm.topology.IRichBolt; +import org.apache.storm.topology.IRichSpout; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.topology.base.BaseBasicBolt; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; + +/** + * This topology demonstrates Storm's stream groupings and multilang capabilities. + */ +public class WordCountTopologyNode { + public static void main(String[] args) throws Exception { + + TopologyBuilder builder = new TopologyBuilder(); + + builder.setSpout("spout", new RandomSentence(), 5); + + builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout"); + builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word")); + + Config conf = new Config(); + conf.setDebug(true); + String topoName = "word-count"; + if (args != null && args.length > 0) { + topoName = args[0]; + } + conf.setNumWorkers(3); + StormSubmitter.submitTopologyWithProgressBar(topoName, conf, builder.createTopology()); + } + + public static class SplitSentence extends ShellBolt implements IRichBolt { + + public SplitSentence() { + super("node", "splitsentence.js"); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("word")); + } + + @Override + public Map getComponentConfiguration() { + return null; + } + } + + public static class RandomSentence extends ShellSpout implements IRichSpout { + + public RandomSentence() { + super("node", "randomsentence.js"); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("word")); + } + + @Override + public Map getComponentConfiguration() { + return null; + } + } + + public static class WordCount extends BaseBasicBolt { + Map counts = new HashMap(); + + @Override + public void execute(Tuple tuple, BasicOutputCollector collector) { + String word = tuple.getString(0); + Integer count = counts.get(word); + if (count == null) { + count = 0; + } + count++; + counts.put(word, count); + collector.emit(new Values(word, count)); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("word", "count")); + } + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/AbstractRankerBolt.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/AbstractRankerBolt.java new file mode 100644 index 00000000000..b04196ca335 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/AbstractRankerBolt.java @@ -0,0 +1,103 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.bolt; + +import java.util.HashMap; +import java.util.Map; +import org.apache.log4j.Logger; +import org.apache.storm.Config; +import org.apache.storm.starter.tools.Rankings; +import org.apache.storm.topology.BasicOutputCollector; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.base.BaseBasicBolt; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; +import org.apache.storm.utils.TupleUtils; + +/** + * This abstract bolt provides the basic behavior of bolts that rank objects according to their count. + *

+ * It uses a template method design pattern for {@link AbstractRankerBolt#execute(Tuple, BasicOutputCollector)} to allow + * actual bolt implementations to specify how incoming tuples are processed, i.e. how the objects embedded within those + * tuples are retrieved and counted. + */ +public abstract class AbstractRankerBolt extends BaseBasicBolt { + + private static final long serialVersionUID = 4931640198501530202L; + private static final int DEFAULT_EMIT_FREQUENCY_IN_SECONDS = 2; + private static final int DEFAULT_COUNT = 10; + + private final int emitFrequencyInSeconds; + private final int count; + private final Rankings rankings; + + public AbstractRankerBolt() { + this(DEFAULT_COUNT, DEFAULT_EMIT_FREQUENCY_IN_SECONDS); + } + + public AbstractRankerBolt(int topN) { + this(topN, DEFAULT_EMIT_FREQUENCY_IN_SECONDS); + } + + public AbstractRankerBolt(int topN, int emitFrequencyInSeconds) { + if (topN < 1) { + throw new IllegalArgumentException("topN must be >= 1 (you requested " + topN + ")"); + } + if (emitFrequencyInSeconds < 1) { + throw new IllegalArgumentException( + "The emit frequency must be >= 1 seconds (you requested " + emitFrequencyInSeconds + " seconds)"); + } + count = topN; + this.emitFrequencyInSeconds = emitFrequencyInSeconds; + rankings = new Rankings(count); + } + + protected Rankings getRankings() { + return rankings; + } + + /** + * This method functions as a template method (design pattern). + */ + @Override + public final void execute(Tuple tuple, BasicOutputCollector collector) { + if (TupleUtils.isTick(tuple)) { + getLogger().debug("Received tick tuple, triggering emit of current rankings"); + emitRankings(collector); + } else { + updateRankingsWithTuple(tuple); + } + } + + abstract void updateRankingsWithTuple(Tuple tuple); + + private void emitRankings(BasicOutputCollector collector) { + collector.emit(new Values(rankings.copy())); + getLogger().debug("Rankings: " + rankings); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("rankings")); + } + + @Override + public Map getComponentConfiguration() { + Map conf = new HashMap(); + conf.put(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, emitFrequencyInSeconds); + return conf; + } + + abstract Logger getLogger(); +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/IntermediateRankingsBolt.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/IntermediateRankingsBolt.java new file mode 100644 index 00000000000..a6a8b49f9b0 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/IntermediateRankingsBolt.java @@ -0,0 +1,53 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.bolt; + +import org.apache.log4j.Logger; +import org.apache.storm.starter.tools.Rankable; +import org.apache.storm.starter.tools.RankableObjectWithFields; +import org.apache.storm.tuple.Tuple; + +/** + * This bolt ranks incoming objects by their count. + *

+ * It assumes the input tuples to adhere to the following format: (object, object_count, additionalField1, + * additionalField2, ..., additionalFieldN). + */ +public final class IntermediateRankingsBolt extends AbstractRankerBolt { + + private static final long serialVersionUID = -1369800530256637409L; + private static final Logger LOG = Logger.getLogger(IntermediateRankingsBolt.class); + + public IntermediateRankingsBolt() { + super(); + } + + public IntermediateRankingsBolt(int topN) { + super(topN); + } + + public IntermediateRankingsBolt(int topN, int emitFrequencyInSeconds) { + super(topN, emitFrequencyInSeconds); + } + + @Override + void updateRankingsWithTuple(Tuple tuple) { + Rankable rankable = RankableObjectWithFields.from(tuple); + super.getRankings().updateWith(rankable); + } + + @Override + Logger getLogger() { + return LOG; + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/PrinterBolt.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/PrinterBolt.java new file mode 100644 index 00000000000..8364222a8b7 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/PrinterBolt.java @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.bolt; + +import org.apache.storm.topology.BasicOutputCollector; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.base.BaseBasicBolt; +import org.apache.storm.tuple.Tuple; + + +public class PrinterBolt extends BaseBasicBolt { + + @Override + public void execute(Tuple tuple, BasicOutputCollector collector) { + System.out.println(tuple); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer ofd) { + } + +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/RollingCountAggBolt.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/RollingCountAggBolt.java new file mode 100644 index 00000000000..9db1dd62963 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/RollingCountAggBolt.java @@ -0,0 +1,66 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.bolt; + +import java.util.HashMap; +import java.util.Map; +import org.apache.log4j.Logger; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.base.BaseRichBolt; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; + +/** + * This bolt aggregates counts from multiple upstream bolts. + */ +public class RollingCountAggBolt extends BaseRichBolt { + private static final long serialVersionUID = 5537727428628598519L; + private static final Logger LOG = Logger.getLogger(RollingCountAggBolt.class); + //Mapping of key->upstreamBolt->count + private Map> counts = new HashMap>(); + private OutputCollector collector; + + + @Override + public void prepare(Map topoConf, TopologyContext context, OutputCollector collector) { + this.collector = collector; + } + + @Override + public void execute(Tuple tuple) { + Object obj = tuple.getValue(0); + long count = tuple.getLong(1); + int source = tuple.getSourceTask(); + Map subCounts = counts.get(obj); + if (subCounts == null) { + subCounts = new HashMap(); + counts.put(obj, subCounts); + } + //Update the current count for this object + subCounts.put(source, count); + //Output the sum of all the known counts so for this key + long sum = 0; + for (Long val : subCounts.values()) { + sum += val; + } + collector.emit(new Values(obj, sum)); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("obj", "count")); + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/RollingCountBolt.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/RollingCountBolt.java new file mode 100644 index 00000000000..0e5cef57d9f --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/RollingCountBolt.java @@ -0,0 +1,134 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.bolt; + +import java.util.HashMap; +import java.util.Map; +import java.util.Map.Entry; +import org.apache.log4j.Logger; +import org.apache.storm.Config; +import org.apache.storm.starter.tools.NthLastModifiedTimeTracker; +import org.apache.storm.starter.tools.SlidingWindowCounter; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.base.BaseRichBolt; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; +import org.apache.storm.utils.TupleUtils; + +/** + * This bolt performs rolling counts of incoming objects, i.e. sliding window based counting. + *

+ * The bolt is configured by two parameters, the length of the sliding window in seconds (which influences the output + * data of the bolt, i.e. how it will count objects) and the emit frequency in seconds (which influences how often the + * bolt will output the latest window counts). For instance, if the window length is set to an equivalent of five + * minutes and the emit frequency to one minute, then the bolt will output the latest five-minute sliding window every + * minute. + *

+ * The bolt emits a rolling count tuple per object, consisting of the object itself, its latest rolling count, and the + * actual duration of the sliding window. The latter is included in case the expected sliding window length (as + * configured by the user) is different from the actual length, e.g. due to high system load. Note that the actual + * window length is tracked and calculated for the window, and not individually for each object within a window. + *

+ * Note: During the startup phase you will usually observe that the bolt warns you about the actual sliding window + * length being smaller than the expected length. This behavior is expected and is caused by the way the sliding window + * counts are initially "loaded up". You can safely ignore this warning during startup (e.g. you will see this warning + * during the first ~ five minutes of startup time if the window length is set to five minutes). + */ +public class RollingCountBolt extends BaseRichBolt { + + private static final long serialVersionUID = 5537727428628598519L; + private static final Logger LOG = Logger.getLogger(RollingCountBolt.class); + private static final int NUM_WINDOW_CHUNKS = 5; + private static final int DEFAULT_SLIDING_WINDOW_IN_SECONDS = NUM_WINDOW_CHUNKS * 60; + private static final int DEFAULT_EMIT_FREQUENCY_IN_SECONDS = DEFAULT_SLIDING_WINDOW_IN_SECONDS / NUM_WINDOW_CHUNKS; + private static final String WINDOW_LENGTH_WARNING_TEMPLATE = + "Actual window length is %d seconds when it should be %d seconds" + + " (you can safely ignore this warning during the startup phase)"; + + private final SlidingWindowCounter counter; + private final int windowLengthInSeconds; + private final int emitFrequencyInSeconds; + private OutputCollector collector; + private NthLastModifiedTimeTracker lastModifiedTracker; + + public RollingCountBolt() { + this(DEFAULT_SLIDING_WINDOW_IN_SECONDS, DEFAULT_EMIT_FREQUENCY_IN_SECONDS); + } + + public RollingCountBolt(int windowLengthInSeconds, int emitFrequencyInSeconds) { + this.windowLengthInSeconds = windowLengthInSeconds; + this.emitFrequencyInSeconds = emitFrequencyInSeconds; + counter = new SlidingWindowCounter(deriveNumWindowChunksFrom(this.windowLengthInSeconds, + this.emitFrequencyInSeconds)); + } + + private int deriveNumWindowChunksFrom(int windowLengthInSeconds, int windowUpdateFrequencyInSeconds) { + return windowLengthInSeconds / windowUpdateFrequencyInSeconds; + } + + @Override + public void prepare(Map topoConf, TopologyContext context, OutputCollector collector) { + this.collector = collector; + lastModifiedTracker = new NthLastModifiedTimeTracker(deriveNumWindowChunksFrom(this.windowLengthInSeconds, + this.emitFrequencyInSeconds)); + } + + @Override + public void execute(Tuple tuple) { + if (TupleUtils.isTick(tuple)) { + LOG.debug("Received tick tuple, triggering emit of current window counts"); + emitCurrentWindowCounts(); + } else { + countObjAndAck(tuple); + } + } + + private void emitCurrentWindowCounts() { + Map counts = counter.getCountsThenAdvanceWindow(); + int actualWindowLengthInSeconds = lastModifiedTracker.secondsSinceOldestModification(); + lastModifiedTracker.markAsModified(); + if (actualWindowLengthInSeconds != windowLengthInSeconds) { + LOG.warn(String.format(WINDOW_LENGTH_WARNING_TEMPLATE, actualWindowLengthInSeconds, windowLengthInSeconds)); + } + emit(counts, actualWindowLengthInSeconds); + } + + private void emit(Map counts, int actualWindowLengthInSeconds) { + for (Entry entry : counts.entrySet()) { + Object obj = entry.getKey(); + Long count = entry.getValue(); + collector.emit(new Values(obj, count, actualWindowLengthInSeconds)); + } + } + + private void countObjAndAck(Tuple tuple) { + Object obj = tuple.getValue(0); + counter.incrementCount(obj); + collector.ack(tuple); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("obj", "count", "actualWindowLengthInSeconds")); + } + + @Override + public Map getComponentConfiguration() { + Map conf = new HashMap(); + conf.put(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, emitFrequencyInSeconds); + return conf; + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/SingleJoinBolt.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/SingleJoinBolt.java new file mode 100644 index 00000000000..5f9b22525ad --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/SingleJoinBolt.java @@ -0,0 +1,119 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.bolt; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import org.apache.storm.Config; +import org.apache.storm.generated.GlobalStreamId; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.base.BaseRichBolt; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.utils.TimeCacheMap; + +/** + * Example of a simple custom bolt for joining two streams. + * NOTE: Prefer to use the built-in JoinBolt wherever applicable + */ +public class SingleJoinBolt extends BaseRichBolt { + OutputCollector collector; + Fields idFields; + Fields outFields; + int numSources; + TimeCacheMap, Map> pending; + Map fieldLocations; + + public SingleJoinBolt(Fields outFields) { + this.outFields = outFields; + } + + @Override + public void prepare(Map conf, TopologyContext context, OutputCollector collector) { + fieldLocations = new HashMap(); + this.collector = collector; + int timeout = ((Number) conf.get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS)).intValue(); + pending = new TimeCacheMap, Map>(timeout, new ExpireCallback()); + numSources = context.getThisSources().size(); + Set idFields = null; + for (GlobalStreamId source : context.getThisSources().keySet()) { + Fields fields = context.getComponentOutputFields(source.get_componentId(), source.get_streamId()); + Set setFields = new HashSet(fields.toList()); + if (idFields == null) { + idFields = setFields; + } else { + idFields.retainAll(setFields); + } + + for (String outfield : outFields) { + for (String sourcefield : fields) { + if (outfield.equals(sourcefield)) { + fieldLocations.put(outfield, source); + } + } + } + } + this.idFields = new Fields(new ArrayList(idFields)); + + if (fieldLocations.size() != outFields.size()) { + throw new RuntimeException("Cannot find all outfields among sources"); + } + } + + @Override + public void execute(Tuple tuple) { + List id = tuple.select(idFields); + GlobalStreamId streamId = new GlobalStreamId(tuple.getSourceComponent(), tuple.getSourceStreamId()); + if (!pending.containsKey(id)) { + pending.put(id, new HashMap()); + } + Map parts = pending.get(id); + if (parts.containsKey(streamId)) { + throw new RuntimeException("Received same side of single join twice"); + } + parts.put(streamId, tuple); + if (parts.size() == numSources) { + pending.remove(id); + List joinResult = new ArrayList(); + for (String outField : outFields) { + GlobalStreamId loc = fieldLocations.get(outField); + joinResult.add(parts.get(loc).getValueByField(outField)); + } + collector.emit(new ArrayList(parts.values()), joinResult); + + for (Tuple part : parts.values()) { + collector.ack(part); + } + } + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(outFields); + } + + private class ExpireCallback implements TimeCacheMap.ExpiredCallback, Map> { + @Override + public void expire(List id, Map tuples) { + for (Tuple tuple : tuples.values()) { + collector.fail(tuple); + } + } + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/SlidingWindowSumBolt.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/SlidingWindowSumBolt.java new file mode 100644 index 00000000000..27021817350 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/SlidingWindowSumBolt.java @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.bolt; + +import java.util.List; +import java.util.Map; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.base.BaseWindowedBolt; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; +import org.apache.storm.windowing.TupleWindow; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Computes sliding window sum. + */ +public class SlidingWindowSumBolt extends BaseWindowedBolt { + private static final Logger LOG = LoggerFactory.getLogger(SlidingWindowSumBolt.class); + + private int sum = 0; + private OutputCollector collector; + + @Override + public void prepare(Map topoConf, TopologyContext context, OutputCollector collector) { + this.collector = collector; + } + + @Override + public void execute(TupleWindow inputWindow) { + /* + * The inputWindow gives a view of + * (a) all the events in the window + * (b) events that expired since last activation of the window + * (c) events that newly arrived since last activation of the window + */ + List tuplesInWindow = inputWindow.get(); + List newTuples = inputWindow.getNew(); + List expiredTuples = inputWindow.getExpired(); + + LOG.debug("Events in current window: " + tuplesInWindow.size()); + /* + * Instead of iterating over all the tuples in the window to compute + * the sum, the values for the new events are added and old events are + * subtracted. Similar optimizations might be possible in other + * windowing computations. + */ + for (Tuple tuple : newTuples) { + sum += (int) tuple.getValue(0); + } + for (Tuple tuple : expiredTuples) { + sum -= (int) tuple.getValue(0); + } + collector.emit(new Values(sum)); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("sum")); + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/TotalRankingsBolt.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/TotalRankingsBolt.java new file mode 100644 index 00000000000..e185a9ec85d --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/TotalRankingsBolt.java @@ -0,0 +1,54 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.bolt; + +import org.apache.log4j.Logger; +import org.apache.storm.starter.tools.Rankings; +import org.apache.storm.tuple.Tuple; + +/** + * This bolt merges incoming {@link Rankings}. + *

+ * It can be used to merge intermediate rankings generated by {@link IntermediateRankingsBolt} into a final, + * consolidated ranking. To do so, configure this bolt with a globalGrouping on {@link IntermediateRankingsBolt}. + */ +public final class TotalRankingsBolt extends AbstractRankerBolt { + + private static final long serialVersionUID = -8447525895532302198L; + private static final Logger LOG = Logger.getLogger(TotalRankingsBolt.class); + + public TotalRankingsBolt() { + super(); + } + + public TotalRankingsBolt(int topN) { + super(topN); + } + + public TotalRankingsBolt(int topN, int emitFrequencyInSeconds) { + super(topN, emitFrequencyInSeconds); + } + + @Override + void updateRankingsWithTuple(Tuple tuple) { + Rankings rankingsToBeMerged = (Rankings) tuple.getValue(0); + super.getRankings().updateWith(rankingsToBeMerged); + super.getRankings().pruneZeroCounts(); + } + + @Override + Logger getLogger() { + return LOG; + } + +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/WordCountBolt.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/WordCountBolt.java new file mode 100644 index 00000000000..870d5bd1931 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/WordCountBolt.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.bolt; + +import java.util.HashMap; +import java.util.Map; +import org.apache.storm.topology.BasicOutputCollector; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.base.BaseBasicBolt; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; + +public class WordCountBolt extends BaseBasicBolt { + Map counts = new HashMap(); + + @Override + public void execute(Tuple tuple, BasicOutputCollector collector) { + String word = tuple.getString(0); + Integer count = counts.get(word); + if (count == null) { + count = 0; + } + count++; + counts.put(word, count); + collector.emit(new Values(word, count)); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("word", "count")); + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/spout/RandomIntegerSpout.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/spout/RandomIntegerSpout.java new file mode 100644 index 00000000000..e1a9ee51404 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/spout/RandomIntegerSpout.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.spout; + +import java.util.Map; +import java.util.Random; +import org.apache.storm.spout.SpoutOutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.base.BaseRichSpout; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Values; +import org.apache.storm.utils.Utils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Emits a random integer and a timestamp value (offset by one day), + * every 100 ms. The ts field can be used in tuple time based windowing. + */ +public class RandomIntegerSpout extends BaseRichSpout { + private static final Logger LOG = LoggerFactory.getLogger(RandomIntegerSpout.class); + private SpoutOutputCollector collector; + private Random rand; + private long msgId = 0; + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("value", "ts", "msgid")); + } + + @Override + public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { + this.collector = collector; + this.rand = new Random(); + } + + @Override + public void nextTuple() { + Utils.sleep(100); + collector.emit(new Values(rand.nextInt(1000), System.currentTimeMillis() - (24 * 60 * 60 * 1000), ++msgId), msgId); + } + + @Override + public void ack(Object msgId) { + LOG.debug("Got ACK for msgId : " + msgId); + } + + @Override + public void fail(Object msgId) { + LOG.debug("Got FAIL for msgId : " + msgId); + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/spout/RandomNumberGeneratorSpout.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/spout/RandomNumberGeneratorSpout.java new file mode 100644 index 00000000000..e6c48048c33 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/spout/RandomNumberGeneratorSpout.java @@ -0,0 +1,88 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.spout; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ThreadLocalRandom; +import org.apache.storm.Config; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.trident.operation.TridentCollector; +import org.apache.storm.trident.spout.IBatchSpout; +import org.apache.storm.tuple.Fields; + +/** + * This spout generates random whole numbers with given {@code maxNumber} value as maximum with the given {@code fields}. + * + */ +public class RandomNumberGeneratorSpout implements IBatchSpout { + private final Fields fields; + private final int batchSize; + private final int maxNumber; + private final Map>> batches = new HashMap<>(); + + public RandomNumberGeneratorSpout(Fields fields, int batchSize, int maxNumber) { + this.fields = fields; + this.batchSize = batchSize; + this.maxNumber = maxNumber; + } + + @Override + public void open(Map conf, TopologyContext context) { + } + + @Override + public void emitBatch(long batchId, TridentCollector collector) { + List> values = null; + if (batches.containsKey(batchId)) { + values = batches.get(batchId); + } else { + values = new ArrayList<>(); + for (int i = 0; i < batchSize; i++) { + List numbers = new ArrayList<>(); + for (int x = 0; x < fields.size(); x++) { + numbers.add(ThreadLocalRandom.current().nextInt(0, maxNumber + 1)); + } + values.add(numbers); + } + batches.put(batchId, values); + } + for (List value : values) { + collector.emit(value); + } + } + + @Override + public void ack(long batchId) { + batches.remove(batchId); + } + + @Override + public void close() { + + } + + @Override + public Map getComponentConfiguration() { + Config conf = new Config(); + conf.setMaxTaskParallelism(1); + return conf; + } + + @Override + public Fields getOutputFields() { + return fields; + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/spout/RandomSentenceSpout.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/spout/RandomSentenceSpout.java new file mode 100644 index 00000000000..92af62655e7 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/spout/RandomSentenceSpout.java @@ -0,0 +1,94 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.spout; + +import java.text.SimpleDateFormat; +import java.util.Date; +import java.util.Map; +import java.util.Random; +import org.apache.storm.spout.SpoutOutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.base.BaseRichSpout; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Values; +import org.apache.storm.utils.Utils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class RandomSentenceSpout extends BaseRichSpout { + private static final Logger LOG = LoggerFactory.getLogger(RandomSentenceSpout.class); + + SpoutOutputCollector collector; + Random rand; + + + @Override + public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { + this.collector = collector; + rand = new Random(); + } + + @Override + public void nextTuple() { + Utils.sleep(100); + String[] sentences = new String[]{ + sentence("the cow jumped over the moon"), sentence("an apple a day keeps the doctor away"), + sentence("four score and seven years ago"), sentence("snow white and the seven dwarfs"), sentence("i am at two with nature") + }; + final String sentence = sentences[rand.nextInt(sentences.length)]; + + LOG.debug("Emitting tuple: {}", sentence); + + collector.emit(new Values(sentence)); + } + + protected String sentence(String input) { + return input; + } + + @Override + public void ack(Object id) { + } + + @Override + public void fail(Object id) { + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("word")); + } + + // Add unique identifier to each tuple, which is helpful for debugging + public static class TimeStamped extends RandomSentenceSpout { + private final String prefix; + + public TimeStamped() { + this(""); + } + + public TimeStamped(String prefix) { + this.prefix = prefix; + } + + @Override + protected String sentence(String input) { + return prefix + currentDate() + " " + input; + } + + private String currentDate() { + return new SimpleDateFormat("yyyy.MM.dd_HH:mm:ss.SSSSSSSSS").format(new Date()); + } + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/AggregateExample.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/AggregateExample.java new file mode 100644 index 00000000000..626ac8f7359 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/AggregateExample.java @@ -0,0 +1,77 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.streams; + +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.starter.spout.RandomIntegerSpout; +import org.apache.storm.streams.Pair; +import org.apache.storm.streams.StreamBuilder; +import org.apache.storm.streams.operations.CombinerAggregator; +import org.apache.storm.streams.operations.mappers.ValueMapper; +import org.apache.storm.streams.windowing.TumblingWindows; +import org.apache.storm.topology.base.BaseWindowedBolt; + +/** + * An example that illustrates the global aggregate. + */ +public class AggregateExample { + @SuppressWarnings("unchecked") + public static void main(String[] args) throws Exception { + StreamBuilder builder = new StreamBuilder(); + /** + * Computes average of the stream of numbers emitted by the spout. Internally the per-partition + * sum and counts are accumulated and emitted to a downstream task where the partially accumulated + * results are merged and the final result is emitted. + */ + builder.newStream(new RandomIntegerSpout(), new ValueMapper(0), 2) + .window(TumblingWindows.of(BaseWindowedBolt.Duration.seconds(5))) + .filter(x -> x > 0 && x < 500) + .aggregate(new Avg()) + .print(); + + Config config = new Config(); + String topoName = "AGG_EXAMPLE"; + if (args.length > 0) { + topoName = args[0]; + } + config.setNumWorkers(1); + StormSubmitter.submitTopologyWithProgressBar(topoName, config, builder.build()); + } + + private static class Avg implements CombinerAggregator, Double> { + @Override + public Pair init() { + return Pair.of(0, 0); + } + + @Override + public Pair apply(Pair sumAndCount, Integer value) { + return Pair.of(sumAndCount.value1 + value, sumAndCount.value2 + 1); + } + + @Override + public Pair merge(Pair sumAndCount1, Pair sumAndCount2) { + System.out.println("Merge " + sumAndCount1 + " and " + sumAndCount2); + return Pair.of( + sumAndCount1.value1 + sumAndCount2.value1, + sumAndCount1.value2 + sumAndCount2.value2 + ); + } + + @Override + public Double result(Pair sumAndCount) { + return (double) sumAndCount.value1 / sumAndCount.value2; + } + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/BranchExample.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/BranchExample.java new file mode 100644 index 00000000000..fc7e74a5a7f --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/BranchExample.java @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.streams; + +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.starter.spout.RandomIntegerSpout; +import org.apache.storm.streams.Stream; +import org.apache.storm.streams.StreamBuilder; +import org.apache.storm.streams.operations.Predicate; +import org.apache.storm.streams.operations.mappers.ValueMapper; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * An example that demonstrates the usage of {@link Stream#branch(Predicate[])} to split a stream + * into multiple branches based on predicates. + */ +public class BranchExample { + private static final Logger LOG = LoggerFactory.getLogger(BranchExample.class); + + @SuppressWarnings("unchecked") + public static void main(String[] args) throws Exception { + StreamBuilder builder = new StreamBuilder(); + Stream[] evenAndOdd = builder + /* + * Create a stream of random numbers from a spout that + * emits random integers by extracting the tuple value at index 0. + */ + .newStream(new RandomIntegerSpout(), new ValueMapper(0)) + /* + * Split the stream of numbers into streams of + * even and odd numbers. The first stream contains even + * and the second contains odd numbers. + */ + .branch(x -> (x % 2) == 0, + x -> (x % 2) == 1); + + evenAndOdd[0].forEach(x -> LOG.info("EVEN> " + x)); + evenAndOdd[1].forEach(x -> LOG.info("ODD > " + x)); + + Config config = new Config(); + String topoName = "branchExample"; + if (args.length > 0) { + topoName = args[0]; + } + + config.setNumWorkers(1); + StormSubmitter.submitTopologyWithProgressBar(topoName, config, builder.build()); + } + +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/GroupByKeyAndWindowExample.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/GroupByKeyAndWindowExample.java new file mode 100644 index 00000000000..02617a2cf57 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/GroupByKeyAndWindowExample.java @@ -0,0 +1,104 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.streams; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.spout.SpoutOutputCollector; +import org.apache.storm.streams.PairStream; +import org.apache.storm.streams.StreamBuilder; +import org.apache.storm.streams.operations.Reducer; +import org.apache.storm.streams.operations.mappers.PairValueMapper; +import org.apache.storm.streams.windowing.SlidingWindows; +import org.apache.storm.streams.windowing.Window; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.base.BaseRichSpout; +import org.apache.storm.topology.base.BaseWindowedBolt.Count; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Values; +import org.apache.storm.utils.Utils; + +/** + * An example that shows the usage of {@link PairStream#groupByKeyAndWindow(Window)} + * and {@link PairStream#reduceByKeyAndWindow(Reducer, Window)}. + */ +public class GroupByKeyAndWindowExample { + public static void main(String[] args) throws Exception { + StreamBuilder builder = new StreamBuilder(); + + // a stream of stock quotes + builder.newStream(new StockQuotes(), new PairValueMapper(0, 1)) + /* + * The elements having the same key within the window will be grouped + * together and the corresponding values will be merged. + * + * The result is a PairStream> with + * 'stock symbol' as the key and 'stock prices' for that symbol within the window as the value. + */ + .groupByKeyAndWindow(SlidingWindows.of(Count.of(6), Count.of(3))) + .print(); + + // a stream of stock quotes + builder.newStream(new StockQuotes(), new PairValueMapper(0, 1)) + /* + * The elements having the same key within the window will be grouped + * together and their values will be reduced using the given reduce function. + * + * Here the result is a PairStream with + * 'stock symbol' as the key and the maximum price for that symbol within the window as the value. + */ + .reduceByKeyAndWindow((x, y) -> x > y ? x : y, SlidingWindows.of(Count.of(6), Count.of(3))) + .print(); + + Config config = new Config(); + String topoName = GroupByKeyAndWindowExample.class.getName(); + if (args.length > 0) { + topoName = args[0]; + } + config.setNumWorkers(1); + StormSubmitter.submitTopologyWithProgressBar(topoName, config, builder.build()); + } + + private static class StockQuotes extends BaseRichSpout { + private final List> values = Arrays.asList( + Arrays.asList(new Values("AAPL", 100.0), new Values("GOOG", 780.0), new Values("FB", 125.0)), + Arrays.asList(new Values("AAPL", 105.0), new Values("GOOG", 790.0), new Values("FB", 130.0)), + Arrays.asList(new Values("AAPL", 102.0), new Values("GOOG", 788.0), new Values("FB", 128.0)) + ); + private SpoutOutputCollector collector; + private int index = 0; + + @Override + public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { + this.collector = collector; + } + + @Override + public void nextTuple() { + Utils.sleep(5000); + for (Values v : values.get(index)) { + collector.emit(v); + } + index = (index + 1) % values.size(); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("symbol", "price")); + } + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/JoinExample.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/JoinExample.java new file mode 100644 index 00000000000..9016c787896 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/JoinExample.java @@ -0,0 +1,97 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.streams; + +import java.util.Map; +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.spout.SpoutOutputCollector; +import org.apache.storm.streams.PairStream; +import org.apache.storm.streams.StreamBuilder; +import org.apache.storm.streams.operations.Function; +import org.apache.storm.streams.operations.mappers.PairValueMapper; +import org.apache.storm.streams.windowing.TumblingWindows; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.base.BaseRichSpout; +import org.apache.storm.topology.base.BaseWindowedBolt.Duration; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Values; +import org.apache.storm.utils.Utils; + +/** + * An example that demonstrates the usage of {@link PairStream#join(PairStream)} to join + * multiple streams. + */ +public class JoinExample { + public static void main(String[] args) throws Exception { + StreamBuilder builder = new StreamBuilder(); + // a stream of (number, square) pairs + PairStream squares = builder + .newStream(new NumberSpout(x -> x * x), + new PairValueMapper<>(0, 1)); + // a stream of (number, cube) pairs + PairStream cubes = builder + .newStream(new NumberSpout(x -> x * x * x), + new PairValueMapper<>(0, 1)); + + // create a windowed stream of five seconds duration + squares.window(TumblingWindows.of(Duration.seconds(5))) + /* + * Join the squares and the cubes stream within the window. + * The values in the squares stream having the same key as that + * of the cubes stream within the window will be joined together. + */ + .join(cubes) + /** + * The results should be of the form (number, (square, cube)) + */ + .print(); + + Config config = new Config(); + String topoName = JoinExample.class.getName(); + if (args.length > 0) { + topoName = args[0]; + } + config.setNumWorkers(1); + StormSubmitter.submitTopologyWithProgressBar(topoName, config, builder.build()); + } + + private static class NumberSpout extends BaseRichSpout { + private final Function function; + private SpoutOutputCollector collector; + private int count = 1; + + NumberSpout(Function function) { + this.function = function; + } + + @Override + public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { + this.collector = collector; + } + + @Override + public void nextTuple() { + Utils.sleep(990); + collector.emit(new Values(count, function.apply(count))); + count++; + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("key", "val")); + } + } + +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/StateQueryExample.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/StateQueryExample.java new file mode 100644 index 00000000000..b72b67614e0 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/StateQueryExample.java @@ -0,0 +1,106 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.streams; + +import java.util.Map; +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.spout.SpoutOutputCollector; +import org.apache.storm.streams.Pair; +import org.apache.storm.streams.Stream; +import org.apache.storm.streams.StreamBuilder; +import org.apache.storm.streams.StreamState; +import org.apache.storm.streams.operations.mappers.ValueMapper; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.testing.TestWordSpout; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.base.BaseRichSpout; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Values; +import org.apache.storm.utils.Utils; + +/** + * An example that uses {@link Stream#stateQuery(StreamState)} to query the state + * + *

You should start a local redis instance before running the 'storm jar' command. By default + * the connection will be attempted at localhost:6379. The default + * RedisKeyValueStateProvider parameters can be overridden in conf/storm.yaml, for e.g. + * + *

+ * topology.state.provider.config: '{"keyClass":"...", "valueClass":"...",
+ *  "keySerializerClass":"...", "valueSerializerClass":"...",
+ *  "jedisPoolConfig":{"host":"localhost", "port":6379,
+ *  "timeout":2000, "database":0, "password":"xyz"}}'
+ * 
+ */ +public class StateQueryExample { + public static void main(String[] args) throws Exception { + StreamBuilder builder = new StreamBuilder(); + StreamState ss = builder.newStream(new TestWordSpout(), new ValueMapper(0), 2) + /* + * Transform the stream of words to a stream of (word, 1) pairs + */ + .mapToPair(w -> Pair.of(w, 1)) + /* + * Update the count in the state. Here the first argument 0L is the initial value for the + * count and + * the second argument is a function that increments the count for each value received. + */ + .updateStateByKey(0L, (count, val) -> count + 1); + + /* + * A stream of words emitted by the QuerySpout is used as + * the keys to query the state. + */ + builder.newStream(new QuerySpout(), new ValueMapper(0)) + /* + * Queries the state and emits the + * matching (key, value) as results. The stream state returned + * by the updateStateByKey is passed as the argument to stateQuery. + */ + .stateQuery(ss).print(); + + Config config = new Config(); + // use redis based state store for persistence + config.put(Config.TOPOLOGY_STATE_PROVIDER, "org.apache.storm.redis.state.RedisKeyValueStateProvider"); + String topoName = "test"; + if (args.length > 0) { + topoName = args[0]; + } + config.setNumWorkers(1); + StormSubmitter.submitTopologyWithProgressBar(topoName, config, builder.build()); + } + + private static class QuerySpout extends BaseRichSpout { + private final String[] words = { "nathan", "mike" }; + private SpoutOutputCollector collector; + + @Override + public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { + this.collector = collector; + } + + @Override + public void nextTuple() { + Utils.sleep(2000); + for (String word : words) { + collector.emit(new Values(word)); + } + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + declarer.declare(new Fields("word")); + } + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/StatefulWordCount.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/StatefulWordCount.java new file mode 100644 index 00000000000..39534b38066 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/StatefulWordCount.java @@ -0,0 +1,77 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.streams; + +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.streams.Pair; +import org.apache.storm.streams.PairStream; +import org.apache.storm.streams.StreamBuilder; +import org.apache.storm.streams.operations.StateUpdater; +import org.apache.storm.streams.operations.mappers.ValueMapper; +import org.apache.storm.streams.windowing.TumblingWindows; +import org.apache.storm.testing.TestWordSpout; +import org.apache.storm.topology.base.BaseWindowedBolt; + +/** + * A stateful word count that uses {@link PairStream#updateStateByKey(StateUpdater)} to + * save the counts in a key value state. This example uses Redis state store. + * + *

You should start a local redis instance before running the 'storm jar' command. By default + * the connection will be attempted at localhost:6379. The default + * RedisKeyValueStateProvider parameters can be overridden in conf/storm.yaml, for e.g. + * + *

+ * topology.state.provider.config: '{"keyClass":"...", "valueClass":"...",
+ *  "keySerializerClass":"...", "valueSerializerClass":"...",
+ *  "jedisPoolConfig":{"host":"localhost", "port":6379,
+ *  "timeout":2000, "database":0, "password":"xyz"}}'
+ * 
+ */ +public class StatefulWordCount { + public static void main(String[] args) throws Exception { + StreamBuilder builder = new StreamBuilder(); + // a stream of words + builder.newStream(new TestWordSpout(), new ValueMapper(0), 2) + .window(TumblingWindows.of(BaseWindowedBolt.Duration.seconds(2))) + /* + * create a stream of (word, 1) pairs + */ + .mapToPair(w -> Pair.of(w, 1)) + /* + * compute the word counts in the last two second window + */ + .countByKey() + /* + * update the word counts in the state. + * Here the first argument 0L is the initial value for the state + * and the second argument is a function that adds the count to the current value in the state. + */ + .updateStateByKey(0L, (state, count) -> state + count) + /* + * convert the state back to a stream and print the results + */ + .toPairStream() + .print(); + + Config config = new Config(); + // use redis based state store for persistence + config.put(Config.TOPOLOGY_STATE_PROVIDER, "org.apache.storm.redis.state.RedisKeyValueStateProvider"); + String topoName = "test"; + if (args.length > 0) { + topoName = args[0]; + } + config.setNumWorkers(1); + StormSubmitter.submitTopologyWithProgressBar(topoName, config, builder.build()); + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/TypedTupleExample.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/TypedTupleExample.java new file mode 100644 index 00000000000..f3e8a5ac5fe --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/TypedTupleExample.java @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.streams; + +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.starter.spout.RandomIntegerSpout; +import org.apache.storm.streams.Pair; +import org.apache.storm.streams.PairStream; +import org.apache.storm.streams.Stream; +import org.apache.storm.streams.StreamBuilder; +import org.apache.storm.streams.operations.mappers.TupleValueMappers; +import org.apache.storm.streams.tuple.Tuple3; +import org.apache.storm.streams.windowing.TumblingWindows; +import org.apache.storm.topology.base.BaseWindowedBolt.Count; + +/** + * An example that illustrates the usage of typed tuples (TupleN<..>) and {@link TupleValueMappers}. + */ +public class TypedTupleExample { + + /** + * The spout emits sequences of (Integer, Long, Long). TupleValueMapper can be used to extract fields + * from the values and produce a stream of typed tuple (Tuple3<Integer, Long, Long> in this case. + */ + public static void main(String[] args) throws Exception { + StreamBuilder builder = new StreamBuilder(); + Stream> stream = builder.newStream(new RandomIntegerSpout(), TupleValueMappers.of(0, 1, 2)); + + PairStream pairs = stream.mapToPair(t -> Pair.of(t.value2 / 10000, t.value1)); + + pairs.window(TumblingWindows.of(Count.of(10))).groupByKey().print(); + + String topoName = "test"; + if (args.length > 0) { + topoName = args[0]; + } + Config config = new Config(); + config.setNumWorkers(1); + StormSubmitter.submitTopologyWithProgressBar(topoName, config, builder.build()); + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/WindowedWordCount.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/WindowedWordCount.java new file mode 100644 index 00000000000..ef8edd4a6fb --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/WindowedWordCount.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.streams; + +import java.util.Arrays; +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.starter.spout.RandomSentenceSpout; +import org.apache.storm.streams.Pair; +import org.apache.storm.streams.StreamBuilder; +import org.apache.storm.streams.operations.mappers.ValueMapper; +import org.apache.storm.streams.windowing.TumblingWindows; +import org.apache.storm.topology.base.BaseWindowedBolt.Duration; + +/** + * A windowed word count example. + */ +public class WindowedWordCount { + public static void main(String[] args) throws Exception { + StreamBuilder builder = new StreamBuilder(); + // A stream of random sentences + builder.newStream(new RandomSentenceSpout(), new ValueMapper(0), 2) + /* + * a two seconds tumbling window + */ + .window(TumblingWindows.of(Duration.seconds(2))) + /* + * split the sentences to words + */ + .flatMap(s -> Arrays.asList(s.split(" "))) + /* + * create a stream of (word, 1) pairs + */ + .mapToPair(w -> Pair.of(w, 1)) + /* + * compute the word counts in the last two second window + */ + .countByKey() + /* + * emit the count for the words that occurred + * at-least five times in the last two seconds + */ + .filter(x -> x.getSecond() >= 5) + /* + * print the results to stdout + */ + .print(); + + Config config = new Config(); + String topoName = "test"; + if (args.length > 0) { + topoName = args[0]; + } + config.setNumWorkers(1); + StormSubmitter.submitTopologyWithProgressBar(topoName, config, builder.build()); + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/WordCountToBolt.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/WordCountToBolt.java new file mode 100644 index 00000000000..997e642d4f8 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/WordCountToBolt.java @@ -0,0 +1,95 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.streams; + +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.redis.bolt.RedisStoreBolt; +import org.apache.storm.redis.common.config.JedisPoolConfig; +import org.apache.storm.redis.common.mapper.RedisDataTypeDescription; +import org.apache.storm.redis.common.mapper.RedisStoreMapper; +import org.apache.storm.streams.Pair; +import org.apache.storm.streams.StreamBuilder; +import org.apache.storm.streams.operations.mappers.ValueMapper; +import org.apache.storm.testing.TestWordSpout; +import org.apache.storm.topology.IRichBolt; +import org.apache.storm.tuple.ITuple; + +/** + * An example that computes word counts and finally emits the results to an + * external bolt (sink). + */ +public class WordCountToBolt { + public static void main(String[] args) throws Exception { + StreamBuilder builder = new StreamBuilder(); + + // Redis config parameters for the RedisStoreBolt + JedisPoolConfig poolConfig = new JedisPoolConfig.Builder() + .setHost("127.0.0.1").setPort(6379).build(); + // Storm tuple to redis key-value mapper + RedisStoreMapper storeMapper = new WordCountStoreMapper(); + // The redis bolt (sink) + IRichBolt redisStoreBolt = new RedisStoreBolt(poolConfig, storeMapper); + + // A stream of words + builder.newStream(new TestWordSpout(), new ValueMapper(0)) + /* + * create a stream of (word, 1) pairs + */ + .mapToPair(w -> Pair.of(w, 1)) + /* + * aggregate the count + */ + .countByKey() + /* + * The result of aggregation is forwarded to + * the RedisStoreBolt. The forwarded tuple is a + * key-value pair of (word, count) with ("key", "value") + * being the field names. + */ + .to(redisStoreBolt); + + Config config = new Config(); + String topoName = "test"; + if (args.length > 0) { + topoName = args[0]; + } + config.setNumWorkers(1); + StormSubmitter.submitTopologyWithProgressBar(topoName, config, builder.build()); + } + + // Maps a storm tuple to redis key and value + private static class WordCountStoreMapper implements RedisStoreMapper { + private final RedisDataTypeDescription description; + private final String hashKey = "wordCount"; + + WordCountStoreMapper() { + description = new RedisDataTypeDescription(RedisDataTypeDescription.RedisDataType.HASH, hashKey); + } + + @Override + public RedisDataTypeDescription getDataTypeDescription() { + return description; + } + + @Override + public String getKeyFromTuple(ITuple tuple) { + return tuple.getStringByField("key"); + } + + @Override + public String getValueFromTuple(ITuple tuple) { + return String.valueOf(tuple.getLongByField("value")); + } + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/NthLastModifiedTimeTracker.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/NthLastModifiedTimeTracker.java new file mode 100644 index 00000000000..67260a3b580 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/NthLastModifiedTimeTracker.java @@ -0,0 +1,77 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.tools; + +import java.util.concurrent.ArrayBlockingQueue; + +import org.apache.storm.utils.Time; + +/** + * This class tracks the time-since-last-modify of a "thing" in a rolling fashion. + *

+ * For example, create a 5-slot tracker to track the five most recent time-since-last-modify. + *

+ * You must manually "mark" that the "something" that you want to track -- in terms of modification times -- has just + * been modified. + */ +public class NthLastModifiedTimeTracker { + + private static final int MILLIS_IN_SEC = 1000; + + private final ArrayBlockingQueue lastModifiedTimesMillis; + + public NthLastModifiedTimeTracker(int numTimesToTrack) { + if (numTimesToTrack < 1) { + throw new IllegalArgumentException( + "numTimesToTrack must be greater than zero (you requested " + numTimesToTrack + ")"); + } + lastModifiedTimesMillis = new ArrayBlockingQueue<>(numTimesToTrack); + initLastModifiedTimesMillis(numTimesToTrack); + } + + private void initLastModifiedTimesMillis(int numTimesToTrack) { + long nowCached = now(); + for (int i = 0; i < numTimesToTrack; i++) { + lastModifiedTimesMillis.add(nowCached); + } + } + + private long now() { + return Time.currentTimeMillis(); + } + + public int secondsSinceOldestModification() { + try { + long modifiedTimeMillis = lastModifiedTimesMillis.take(); + return (int) ((now() - modifiedTimeMillis) / MILLIS_IN_SEC); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + + public void markAsModified() { + updateLastModifiedTime(); + } + + private void updateLastModifiedTime() { + if (!lastModifiedTimesMillis.offer(now())) { + lastModifiedTimesMillis.poll(); + try { + lastModifiedTimesMillis.put(now()); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + } + +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/Rankable.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/Rankable.java new file mode 100644 index 00000000000..ea9e6d66d1f --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/Rankable.java @@ -0,0 +1,27 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.tools; + +public interface Rankable extends Comparable { + + Object getObject(); + + long getCount(); + + /** + * Note: We do not defensively copy the object wrapped by the Rankable. It is passed as is. + * + * @return a defensive copy + */ + Rankable copy(); +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/RankableObjectWithFields.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/RankableObjectWithFields.java new file mode 100644 index 00000000000..fea589691c9 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/RankableObjectWithFields.java @@ -0,0 +1,142 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.tools; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import java.io.Serializable; +import java.util.List; +import org.apache.storm.tuple.Tuple; + +/** + * This class wraps an objects and its associated count, including any additional data fields. + *

+ * This class can be used, for instance, to track the number of occurrences of an object in a Storm topology. + */ +public class RankableObjectWithFields implements Rankable, Serializable { + + private static final long serialVersionUID = -9102878650001058090L; + private static final String toStringSeparator = "|"; + + private final Object obj; + private final long count; + private final ImmutableList fields; + + public RankableObjectWithFields(Object obj, long count, Object... otherFields) { + if (obj == null) { + throw new IllegalArgumentException("The object must not be null"); + } + if (count < 0) { + throw new IllegalArgumentException("The count must be >= 0"); + } + this.obj = obj; + this.count = count; + fields = ImmutableList.copyOf(otherFields); + + } + + /** + * Construct a new instance based on the provided {@link Tuple}. + *

+ * This method expects the object to be ranked in the first field (index 0) of the provided tuple, and the number of + * occurrences of the object (its count) in the second field (index 1). Any further fields in the tuple will be + * extracted and tracked, too. These fields can be accessed via {@link RankableObjectWithFields#getFields()}. + * + * @param tuple + * + * @return new instance based on the provided tuple + */ + public static RankableObjectWithFields from(Tuple tuple) { + List otherFields = Lists.newArrayList(tuple.getValues()); + Object obj = otherFields.remove(0); + Long count = (Long) otherFields.remove(0); + return new RankableObjectWithFields(obj, count, otherFields.toArray()); + } + + @Override + public Object getObject() { + return obj; + } + + @Override + public long getCount() { + return count; + } + + /** + * Get fields. + * @return an immutable list of any additional data fields of the object (may be empty but will never be null) + */ + public List getFields() { + return fields; + } + + @Override + public int compareTo(Rankable other) { + long delta = this.getCount() - other.getCount(); + if (delta > 0) { + return 1; + } else if (delta < 0) { + return -1; + } else { + return 0; + } + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof RankableObjectWithFields)) { + return false; + } + RankableObjectWithFields other = (RankableObjectWithFields) o; + return obj.equals(other.obj) && count == other.count; + } + + @Override + public int hashCode() { + int result = 17; + int countHash = (int) (count ^ (count >>> 32)); + result = 31 * result + countHash; + result = 31 * result + obj.hashCode(); + return result; + } + + @Override + public String toString() { + StringBuffer buf = new StringBuffer(); + buf.append("["); + buf.append(obj); + buf.append(toStringSeparator); + buf.append(count); + for (Object field : fields) { + buf.append(toStringSeparator); + buf.append(field); + } + buf.append("]"); + return buf.toString(); + } + + /** + * Note: We do not defensively copy the wrapped object and any accompanying fields. We do guarantee, however, + * do return a defensive (shallow) copy of the List object that is wrapping any accompanying fields. + */ + @Override + public Rankable copy() { + List shallowCopyOfFields = ImmutableList.copyOf(getFields()); + return new RankableObjectWithFields(getObject(), getCount(), shallowCopyOfFields); + } + +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/Rankings.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/Rankings.java new file mode 100644 index 00000000000..89bba59da90 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/Rankings.java @@ -0,0 +1,150 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.tools; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import java.io.Serializable; +import java.util.Collections; +import java.util.List; + +public class Rankings implements Serializable { + + private static final long serialVersionUID = -1549827195410578903L; + private static final int DEFAULT_COUNT = 10; + + private final int maxSize; + private final List rankedItems = Lists.newArrayList(); + + public Rankings() { + this(DEFAULT_COUNT); + } + + public Rankings(int topN) { + if (topN < 1) { + throw new IllegalArgumentException("topN must be >= 1"); + } + maxSize = topN; + } + + /** + * Copy constructor. + */ + public Rankings(Rankings other) { + this(other.maxSize()); + updateWith(other); + } + + /** + * Get max size. + * @return the maximum possible number (size) of ranked objects this instance can hold + */ + public int maxSize() { + return maxSize; + } + + /** + * Get size. + * @return the number (size) of ranked objects this instance is currently holding + */ + public int size() { + return rankedItems.size(); + } + + /** + * The returned defensive copy is only "somewhat" defensive. We do, for instance, return a defensive copy of the + * enclosing List instance, and we do try to defensively copy any contained Rankable objects, too. However, the + * contract of {@link org.apache.storm.starter.tools.Rankable#copy()} does not guarantee that any Object's embedded within + * a Rankable will be defensively copied, too. + * + * @return a somewhat defensive copy of ranked items + */ + public List getRankings() { + List copy = Lists.newLinkedList(); + for (Rankable r : rankedItems) { + copy.add(r.copy()); + } + return ImmutableList.copyOf(copy); + } + + public void updateWith(Rankings other) { + for (Rankable r : other.getRankings()) { + updateWith(r); + } + } + + public void updateWith(Rankable r) { + synchronized (rankedItems) { + addOrReplace(r); + rerank(); + shrinkRankingsIfNeeded(); + } + } + + private void addOrReplace(Rankable r) { + Integer rank = findRankOf(r); + if (rank != null) { + rankedItems.set(rank, r); + } else { + rankedItems.add(r); + } + } + + private Integer findRankOf(Rankable r) { + Object tag = r.getObject(); + for (int rank = 0; rank < rankedItems.size(); rank++) { + Object cur = rankedItems.get(rank).getObject(); + if (cur.equals(tag)) { + return rank; + } + } + return null; + } + + private void rerank() { + Collections.sort(rankedItems); + Collections.reverse(rankedItems); + } + + private void shrinkRankingsIfNeeded() { + if (rankedItems.size() > maxSize) { + rankedItems.remove(maxSize); + } + } + + /** + * Removes ranking entries that have a count of zero. + */ + public void pruneZeroCounts() { + int i = 0; + while (i < rankedItems.size()) { + if (rankedItems.get(i).getCount() == 0) { + rankedItems.remove(i); + } else { + i++; + } + } + } + + @Override + public String toString() { + return rankedItems.toString(); + } + + /** + * Creates a (defensive) copy of itself. + */ + public Rankings copy() { + return new Rankings(this); + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/SlidingWindowCounter.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/SlidingWindowCounter.java new file mode 100644 index 00000000000..136d586abd4 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/SlidingWindowCounter.java @@ -0,0 +1,116 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.tools; + +import java.io.Serializable; +import java.util.Map; + + +/** + * This class counts objects in a sliding window fashion. + *

+ * It is designed 1) to give multiple "producer" threads write access to the counter, i.e. being able to increment + * counts of objects, and 2) to give a single "consumer" thread (e.g. {@link org.apache.storm.starter.bolt.RollingCountBolt}) + * read access to the counter. Whenever the consumer thread performs a read operation, this class will advance the head slot + * of the sliding window counter. This means that the consumer thread indirectly controls where writes of the producer threads + * will go to. Also, by itself this class will not advance the head slot. + *

+ * A note for analyzing data based on a sliding window count: During the initial windowLengthInSlots + * iterations, this sliding window counter will always return object counts that are equal or greater than in the + * previous iteration. This is the effect of the counter "loading up" at the very start of its existence. Conceptually, + * this is the desired behavior. + *

+ * To give an example, using a counter with 5 slots which for the sake of this example represent 1 minute of time each: + *

+ *

+ * {@code
+ * Sliding window counts of an object X over time
+ *
+ * Minute (timeline):
+ * 1    2   3   4   5   6   7   8
+ *
+ * Observed counts per minute:
+ * 1    1   1   1   0   0   0   0
+ *
+ * Counts returned by counter:
+ * 1    2   3   4   4   3   2   1
+ * }
+ * 
+ *

+ * As you can see in this example, for the first windowLengthInSlots (here: the first five minutes) the + * counter will always return counts equal or greater than in the previous iteration (1, 2, 3, 4, 4). This initial load + * effect needs to be accounted for whenever you want to perform analyses such as trending topics; otherwise your + * analysis algorithm might falsely identify the object to be trending as the counter seems to observe continuously + * increasing counts. Also, note that during the initial load phase every object will exhibit increasing + * counts. + *

+ * On a high-level, the counter exhibits the following behavior: If you asked the example counter after two minutes, + * "how often did you count the object during the past five minutes?", then it should reply "I have counted it 2 times + * in the past five minutes", implying that it can only account for the last two of those five minutes because the + * counter was not running before that time. + * + * @param The type of those objects we want to count. + */ +public final class SlidingWindowCounter implements Serializable { + + private static final long serialVersionUID = -2645063988768785810L; + + private SlotBasedCounter objCounter; + private int headSlot; + private int tailSlot; + private int windowLengthInSlots; + + public SlidingWindowCounter(int windowLengthInSlots) { + if (windowLengthInSlots < 2) { + throw new IllegalArgumentException( + "Window length in slots must be at least two (you requested " + windowLengthInSlots + ")"); + } + this.windowLengthInSlots = windowLengthInSlots; + this.objCounter = new SlotBasedCounter(this.windowLengthInSlots); + + this.headSlot = 0; + this.tailSlot = slotAfter(headSlot); + } + + public void incrementCount(T obj) { + objCounter.incrementCount(obj, headSlot); + } + + /** + * Return the current (total) counts of all tracked objects, then advance the window. + *

+ * Whenever this method is called, we consider the counts of the current sliding window to be available to and + * successfully processed "upstream" (i.e. by the caller). Knowing this we will start counting any subsequent + * objects within the next "chunk" of the sliding window. + * + * @return The current (total) counts of all tracked objects. + */ + public Map getCountsThenAdvanceWindow() { + @SuppressWarnings("checkstyle:VariableDeclarationUsageDistance") + Map counts = objCounter.getCounts(); + objCounter.wipeZeros(); + objCounter.wipeSlot(tailSlot); + advanceHead(); + return counts; + } + + private void advanceHead() { + headSlot = tailSlot; + tailSlot = slotAfter(tailSlot); + } + + private int slotAfter(int slot) { + return (slot + 1) % windowLengthInSlots; + } + +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/SlotBasedCounter.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/SlotBasedCounter.java new file mode 100644 index 00000000000..6f48c4784ea --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/SlotBasedCounter.java @@ -0,0 +1,106 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.tools; + +import java.io.Serializable; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; + +/** + * This class provides per-slot counts of the occurrences of objects. + *

+ * It can be used, for instance, as a building block for implementing sliding window counting of objects. + * + * @param The type of those objects we want to count. + */ +public final class SlotBasedCounter implements Serializable { + + private static final long serialVersionUID = 4858185737378394432L; + + private final Map objToCounts = new HashMap(); + private final int numSlots; + + public SlotBasedCounter(int numSlots) { + if (numSlots <= 0) { + throw new IllegalArgumentException("Number of slots must be greater than zero (you requested " + numSlots + ")"); + } + this.numSlots = numSlots; + } + + public void incrementCount(T obj, int slot) { + long[] counts = objToCounts.get(obj); + if (counts == null) { + counts = new long[this.numSlots]; + objToCounts.put(obj, counts); + } + counts[slot]++; + } + + public long getCount(T obj, int slot) { + long[] counts = objToCounts.get(obj); + if (counts == null) { + return 0; + } else { + return counts[slot]; + } + } + + public Map getCounts() { + Map result = new HashMap(); + for (T obj : objToCounts.keySet()) { + result.put(obj, computeTotalCount(obj)); + } + return result; + } + + private long computeTotalCount(T obj) { + long[] curr = objToCounts.get(obj); + long total = 0; + for (long l : curr) { + total += l; + } + return total; + } + + /** + * Reset the slot count of any tracked objects to zero for the given slot. + */ + public void wipeSlot(int slot) { + for (T obj : objToCounts.keySet()) { + resetSlotCountToZero(obj, slot); + } + } + + private void resetSlotCountToZero(T obj, int slot) { + long[] counts = objToCounts.get(obj); + counts[slot] = 0; + } + + private boolean shouldBeRemovedFromCounter(T obj) { + return computeTotalCount(obj) == 0; + } + + /** + * Remove any object from the counter whose total count is zero (to free up memory). + */ + public void wipeZeros() { + for (Iterator> it = objToCounts.entrySet().iterator(); it.hasNext(); ) { + Map.Entry entry = it.next(); + if (shouldBeRemovedFromCounter(entry.getKey())) { + it.remove(); + } + } + } + +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/DebugMemoryMapState.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/DebugMemoryMapState.java new file mode 100644 index 00000000000..3a7aeb0e53a --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/DebugMemoryMapState.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.starter.trident; + +import java.util.List; +import java.util.Map; +import java.util.UUID; +import org.apache.storm.task.IMetricsContext; +import org.apache.storm.topology.FailedException; +import org.apache.storm.trident.state.CombinerValueUpdater; +import org.apache.storm.trident.state.State; +import org.apache.storm.trident.state.StateFactory; +import org.apache.storm.trident.state.ValueUpdater; +import org.apache.storm.trident.testing.MemoryMapState; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class DebugMemoryMapState extends MemoryMapState { + private static final Logger LOG = LoggerFactory.getLogger(DebugMemoryMapState.class); + + private int updateCount = 0; + + public DebugMemoryMapState(String id) { + super(id); + } + + @Override + public List multiUpdate(List> keys, List updaters) { + print(keys, updaters); + if ((updateCount++ % 5) == 0) { + LOG.error("Throwing FailedException"); + throw new FailedException("Enforced State Update Fail. On retrial should replay the exact same batch."); + } + return super.multiUpdate(keys, updaters); + } + + private void print(List> keys, List updaters) { + for (int i = 0; i < keys.size(); i++) { + ValueUpdater valueUpdater = updaters.get(i); + Object arg = ((CombinerValueUpdater) valueUpdater).getArg(); + LOG.info("updateCount = {}, keys = {} => updaterArgs = {}", updateCount, keys.get(i), arg); + } + } + + public static class Factory implements StateFactory { + String id; + + public Factory() { + id = UUID.randomUUID().toString(); + } + + @Override + public State makeState(Map conf, IMetricsContext metrics, int partitionIndex, int numPartitions) { + return new DebugMemoryMapState(id + partitionIndex); + } + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentMapExample.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentMapExample.java new file mode 100644 index 00000000000..067eeff3d5c --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentMapExample.java @@ -0,0 +1,116 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.trident; + +import java.util.ArrayList; +import java.util.List; +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.generated.StormTopology; +import org.apache.storm.trident.TridentState; +import org.apache.storm.trident.TridentTopology; +import org.apache.storm.trident.operation.BaseFilter; +import org.apache.storm.trident.operation.Consumer; +import org.apache.storm.trident.operation.Filter; +import org.apache.storm.trident.operation.FlatMapFunction; +import org.apache.storm.trident.operation.MapFunction; +import org.apache.storm.trident.operation.builtin.Count; +import org.apache.storm.trident.operation.builtin.FilterNull; +import org.apache.storm.trident.operation.builtin.MapGet; +import org.apache.storm.trident.operation.builtin.Sum; +import org.apache.storm.trident.testing.FixedBatchSpout; +import org.apache.storm.trident.testing.MemoryMapState; +import org.apache.storm.trident.tuple.TridentTuple; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Values; +import org.apache.storm.utils.DRPCClient; + +/** + * A simple example that demonstrates the usage of {@link org.apache.storm.trident.Stream#map(MapFunction)} and + * {@link org.apache.storm.trident.Stream#flatMap(FlatMapFunction)} functions. + */ +public class TridentMapExample { + + private static MapFunction toUpper = new MapFunction() { + @Override + public Values execute(TridentTuple input) { + return new Values(input.getStringByField("word").toUpperCase()); + } + }; + + private static FlatMapFunction split = new FlatMapFunction() { + @Override + public Iterable execute(TridentTuple input) { + List valuesList = new ArrayList<>(); + for (String word : input.getString(0).split(" ")) { + valuesList.add(new Values(word)); + } + return valuesList; + } + }; + + private static Filter theFilter = new BaseFilter() { + @Override + public boolean isKeep(TridentTuple tuple) { + return tuple.getString(0).equals("THE"); + } + }; + + public static StormTopology buildTopology() { + FixedBatchSpout spout = new FixedBatchSpout( + new Fields("word"), 3, new Values("the cow jumped over the moon"), + new Values("the man went to the store and bought some candy"), new Values("four score and seven years ago"), + new Values("how many apples can you eat"), new Values("to be or not to be the person")); + spout.setCycle(true); + + TridentTopology topology = new TridentTopology(); + TridentState wordCounts = topology.newStream("spout1", spout).parallelismHint(16) + .flatMap(split) + .map(toUpper, new Fields("uppercased")) + .filter(theFilter) + .peek(new Consumer() { + @Override + public void accept(TridentTuple input) { + System.out.println(input.getString(0)); + } + }) + .groupBy(new Fields("uppercased")) + .persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count")) + .parallelismHint(16); + + topology.newDRPCStream("words") + .flatMap(split, new Fields("word")) + .groupBy(new Fields("word")) + .stateQuery(wordCounts, new Fields("word"), new MapGet(), new Fields("count")) + .filter(new FilterNull()) + .aggregate(new Fields("count"), new Sum(), new Fields("sum")); + return topology.build(); + } + + public static void main(String[] args) throws Exception { + Config conf = new Config(); + conf.setMaxSpoutPending(20); + String topoName = "wordCounter"; + if (args.length > 0) { + topoName = args[0]; + } + conf.setNumWorkers(3); + StormSubmitter.submitTopologyWithProgressBar(topoName, conf, buildTopology()); + try (DRPCClient drpc = DRPCClient.getConfiguredClient(conf)) { + for (int i = 0; i < 10; i++) { + System.out.println("DRPC RESULT: " + drpc.execute("words", "CAT THE DOG JUMPED")); + Thread.sleep(1000); + } + } + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentMinMaxOfDevicesTopology.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentMinMaxOfDevicesTopology.java new file mode 100644 index 00000000000..5944408ee71 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentMinMaxOfDevicesTopology.java @@ -0,0 +1,188 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.trident; + +import java.io.Serializable; +import java.util.Comparator; +import java.util.List; +import java.util.concurrent.ThreadLocalRandom; +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.generated.StormTopology; +import org.apache.storm.starter.spout.RandomNumberGeneratorSpout; +import org.apache.storm.trident.Stream; +import org.apache.storm.trident.TridentTopology; +import org.apache.storm.trident.operation.builtin.Debug; +import org.apache.storm.trident.testing.FixedBatchSpout; +import org.apache.storm.trident.tuple.TridentTuple; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Values; + +/** + * This class demonstrates different usages of + * * {@link Stream#minBy(String)} + * * {@link Stream#maxBy(String)} + * operations on trident {@link Stream}. + */ +public class TridentMinMaxOfDevicesTopology { + + /** + * Creates a topology with device-id and count (which are whole numbers) as tuple fields in a stream and it finally + * generates result stream based on min amd max with device-id and count values. + */ + public static StormTopology buildDevicesTopology() { + String deviceId = "device-id"; + String count = "count"; + Fields allFields = new Fields(deviceId, count); + + RandomNumberGeneratorSpout spout = new RandomNumberGeneratorSpout(allFields, 10, 1000); + + TridentTopology topology = new TridentTopology(); + Stream devicesStream = topology + .newStream("devicegen-spout", spout) + .each(allFields, new Debug("##### devices")); + + devicesStream + .minBy(deviceId) + .each(allFields, new Debug("#### device with min id")); + + devicesStream + .maxBy(count) + .each(allFields, new Debug("#### device with max count")); + + return topology.build(); + } + + /** + * Creates a topology which demonstrates min/max operations on tuples of stream which contain vehicle and driver fields + * with values {@link TridentMinMaxOfDevicesTopology.Vehicle} and {@link TridentMinMaxOfDevicesTopology.Driver} respectively. + */ + public static StormTopology buildVehiclesTopology() { + Fields driverField = new Fields(Driver.FIELD_NAME); + Fields vehicleField = new Fields(Vehicle.FIELD_NAME); + Fields allFields = new Fields(Vehicle.FIELD_NAME, Driver.FIELD_NAME); + + FixedBatchSpout spout = new FixedBatchSpout(allFields, 10, Vehicle.generateVehicles(20)); + spout.setCycle(true); + + TridentTopology topology = new TridentTopology(); + Stream vehiclesStream = topology + .newStream("spout1", spout) + .each(allFields, new Debug("##### vehicles")); + + Stream slowVehiclesStream = vehiclesStream + .min(new SpeedComparator()) + .each(vehicleField, new Debug("#### slowest vehicle")); + + Stream slowDriversStream = slowVehiclesStream + .project(driverField) + .each(driverField, new Debug("##### slowest driver")); + + vehiclesStream + .max(new SpeedComparator()) + .each(vehicleField, new Debug("#### fastest vehicle")) + .project(driverField) + .each(driverField, new Debug("##### fastest driver")); + + vehiclesStream + .max(new EfficiencyComparator()) + .each(vehicleField, new Debug("#### efficient vehicle")); + + return topology.build(); + } + + public static void main(String[] args) throws Exception { + + StormTopology topology = buildDevicesTopology(); + Config conf = new Config(); + conf.setMaxSpoutPending(20); + conf.setNumWorkers(3); + StormSubmitter.submitTopologyWithProgressBar("devices-topology", conf, topology); + } + + static class SpeedComparator implements Comparator, Serializable { + + @Override + public int compare(TridentTuple tuple1, TridentTuple tuple2) { + Vehicle vehicle1 = (Vehicle) tuple1.getValueByField(Vehicle.FIELD_NAME); + Vehicle vehicle2 = (Vehicle) tuple2.getValueByField(Vehicle.FIELD_NAME); + return Integer.compare(vehicle1.maxSpeed, vehicle2.maxSpeed); + } + } + + static class EfficiencyComparator implements Comparator, Serializable { + + @Override + public int compare(TridentTuple tuple1, TridentTuple tuple2) { + Vehicle vehicle1 = (Vehicle) tuple1.getValueByField(Vehicle.FIELD_NAME); + Vehicle vehicle2 = (Vehicle) tuple2.getValueByField(Vehicle.FIELD_NAME); + return Double.compare(vehicle1.efficiency, vehicle2.efficiency); + } + + } + + static class Driver implements Serializable { + static final String FIELD_NAME = "driver"; + final String name; + final int id; + + Driver(String name, int id) { + this.name = name; + this.id = id; + } + + @Override + public String toString() { + return "Driver{" + + "name='" + name + '\'' + + ", id=" + id + + '}'; + } + } + + static class Vehicle implements Serializable { + static final String FIELD_NAME = "vehicle"; + final String name; + final int maxSpeed; + final double efficiency; + + Vehicle(String name, int maxSpeed, double efficiency) { + this.name = name; + this.maxSpeed = maxSpeed; + this.efficiency = efficiency; + } + + public static List[] generateVehicles(int count) { + List[] vehicles = new List[count]; + for (int i = 0; i < count; i++) { + int id = i - 1; + vehicles[i] = + (new Values( + new Vehicle("Vehicle-" + id, ThreadLocalRandom.current().nextInt(0, 100), + ThreadLocalRandom.current().nextDouble(1, 5)), + new Driver("Driver-" + id, id) + )); + } + return vehicles; + } + + @Override + public String toString() { + return "Vehicle{" + + "name='" + name + '\'' + + ", maxSpeed=" + maxSpeed + + ", efficiency=" + efficiency + + '}'; + } + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentMinMaxOfVehiclesTopology.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentMinMaxOfVehiclesTopology.java new file mode 100644 index 00000000000..3c0ff31ac83 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentMinMaxOfVehiclesTopology.java @@ -0,0 +1,166 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.trident; + +import java.io.Serializable; +import java.util.Comparator; +import java.util.List; +import java.util.concurrent.ThreadLocalRandom; +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.generated.StormTopology; +import org.apache.storm.trident.Stream; +import org.apache.storm.trident.TridentTopology; +import org.apache.storm.trident.operation.builtin.Debug; +import org.apache.storm.trident.testing.FixedBatchSpout; +import org.apache.storm.trident.tuple.TridentTuple; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Values; + +/** + * This class demonstrates different usages of + * * {@link Stream#minBy(String, Comparator)} + * * {@link Stream#min(Comparator)} + * * {@link Stream#maxBy(String, Comparator)} + * * {@link Stream#max(Comparator)} + * operations on trident {@link Stream}. + */ +public class TridentMinMaxOfVehiclesTopology { + + /** + * Creates a topology which demonstrates min/max operations on tuples of stream which contain vehicle and driver fields + * with values {@link TridentMinMaxOfVehiclesTopology.Vehicle} and {@link TridentMinMaxOfVehiclesTopology.Driver} respectively. + */ + public static StormTopology buildVehiclesTopology() { + Fields driverField = new Fields(Driver.FIELD_NAME); + Fields vehicleField = new Fields(Vehicle.FIELD_NAME); + Fields allFields = new Fields(Vehicle.FIELD_NAME, Driver.FIELD_NAME); + + FixedBatchSpout spout = new FixedBatchSpout(allFields, 10, Vehicle.generateVehicles(20)); + spout.setCycle(true); + + TridentTopology topology = new TridentTopology(); + Stream vehiclesStream = topology + .newStream("spout1", spout) + .each(allFields, new Debug("##### vehicles")); + + Stream slowVehiclesStream = + vehiclesStream + .min(new SpeedComparator()) + .each(vehicleField, new Debug("#### slowest vehicle")); + + Stream slowDriversStream = + slowVehiclesStream + .project(driverField) + .each(driverField, new Debug("##### slowest driver")); + + vehiclesStream + .max(new SpeedComparator()) + .each(vehicleField, new Debug("#### fastest vehicle")) + .project(driverField) + .each(driverField, new Debug("##### fastest driver")); + + vehiclesStream + .minBy(Vehicle.FIELD_NAME, new EfficiencyComparator()) + .each(vehicleField, new Debug("#### least efficient vehicle")); + + vehiclesStream + .maxBy(Vehicle.FIELD_NAME, new EfficiencyComparator()) + .each(vehicleField, new Debug("#### most efficient vehicle")); + + return topology.build(); + } + + public static void main(String[] args) throws Exception { + + StormTopology topology = buildVehiclesTopology(); + Config conf = new Config(); + conf.setMaxSpoutPending(20); + conf.setNumWorkers(3); + StormSubmitter.submitTopologyWithProgressBar("vehicles-topology", conf, topology); + } + + static class SpeedComparator implements Comparator, Serializable { + + @Override + public int compare(TridentTuple tuple1, TridentTuple tuple2) { + Vehicle vehicle1 = (Vehicle) tuple1.getValueByField(Vehicle.FIELD_NAME); + Vehicle vehicle2 = (Vehicle) tuple2.getValueByField(Vehicle.FIELD_NAME); + return Integer.compare(vehicle1.maxSpeed, vehicle2.maxSpeed); + } + } + + static class EfficiencyComparator implements Comparator, Serializable { + + @Override + public int compare(Vehicle vehicle1, Vehicle vehicle2) { + return Double.compare(vehicle1.efficiency, vehicle2.efficiency); + } + + } + + static class Driver implements Serializable { + static final String FIELD_NAME = "driver"; + final String name; + final int id; + + Driver(String name, int id) { + this.name = name; + this.id = id; + } + + @Override + public String toString() { + return "Driver{" + + "name='" + name + '\'' + + ", id=" + id + + '}'; + } + } + + static class Vehicle implements Serializable { + static final String FIELD_NAME = "vehicle"; + final String name; + final int maxSpeed; + final double efficiency; + + Vehicle(String name, int maxSpeed, double efficiency) { + this.name = name; + this.maxSpeed = maxSpeed; + this.efficiency = efficiency; + } + + public static List[] generateVehicles(int count) { + List[] vehicles = new List[count]; + for (int i = 0; i < count; i++) { + int id = i - 1; + vehicles[i] = + (new Values( + new Vehicle("Vehicle-" + id, ThreadLocalRandom.current().nextInt(0, 100), + ThreadLocalRandom.current().nextDouble(1, 5)), + new Driver("Driver-" + id, id) + )); + } + return vehicles; + } + + @Override + public String toString() { + return "Vehicle{" + + "name='" + name + '\'' + + ", maxSpeed=" + maxSpeed + + ", efficiency=" + efficiency + + '}'; + } + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentReach.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentReach.java new file mode 100644 index 00000000000..a159a3eff30 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentReach.java @@ -0,0 +1,152 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.trident; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.generated.StormTopology; +import org.apache.storm.task.IMetricsContext; +import org.apache.storm.trident.TridentState; +import org.apache.storm.trident.TridentTopology; +import org.apache.storm.trident.operation.BaseFunction; +import org.apache.storm.trident.operation.CombinerAggregator; +import org.apache.storm.trident.operation.TridentCollector; +import org.apache.storm.trident.operation.builtin.MapGet; +import org.apache.storm.trident.operation.builtin.Sum; +import org.apache.storm.trident.state.ReadOnlyState; +import org.apache.storm.trident.state.State; +import org.apache.storm.trident.state.StateFactory; +import org.apache.storm.trident.state.map.ReadOnlyMapState; +import org.apache.storm.trident.tuple.TridentTuple; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Values; +import org.apache.storm.utils.DRPCClient; + +public class TridentReach { + public static Map> TWEETERS_DB = new HashMap>() { + { + put("foo.com/blog/1", Arrays.asList("sally", "bob", "tim", "george", "nathan")); + put("engineering.twitter.com/blog/5", Arrays.asList("adam", "david", "sally", "nathan")); + put("tech.backtype.com/blog/123", Arrays.asList("tim", "mike", "john")); + } + }; + + public static Map> FOLLOWERS_DB = new HashMap>() { + { + put("sally", Arrays.asList("bob", "tim", "alice", "adam", "jim", "chris", "jai")); + put("bob", Arrays.asList("sally", "nathan", "jim", "mary", "david", "vivian")); + put("tim", Arrays.asList("alex")); + put("nathan", Arrays.asList("sally", "bob", "adam", "harry", "chris", "vivian", "emily", "jordan")); + put("adam", Arrays.asList("david", "carissa")); + put("mike", Arrays.asList("john", "bob")); + put("john", Arrays.asList("alice", "nathan", "jim", "mike", "bob")); + } + }; + + public static StormTopology buildTopology() { + TridentTopology topology = new TridentTopology(); + TridentState urlToTweeters = topology.newStaticState(new StaticSingleKeyMapState.Factory(TWEETERS_DB)); + TridentState tweetersToFollowers = topology.newStaticState(new StaticSingleKeyMapState.Factory(FOLLOWERS_DB)); + + + topology.newDRPCStream("reach").stateQuery(urlToTweeters, new Fields("args"), new MapGet(), new Fields( + "tweeters")).each(new Fields("tweeters"), new ExpandList(), new Fields("tweeter")).shuffle().stateQuery( + tweetersToFollowers, new Fields("tweeter"), new MapGet(), new Fields("followers")).each(new Fields("followers"), + new ExpandList(), + new Fields("follower")) + .groupBy(new Fields("follower")).aggregate(new One(), new Fields( + "one")).aggregate(new Fields("one"), new Sum(), new Fields("reach")); + return topology.build(); + } + + public static void main(String[] args) throws Exception { + Config conf = new Config(); + StormSubmitter.submitTopology("reach", conf, buildTopology()); + try (DRPCClient drpc = DRPCClient.getConfiguredClient(conf)) { + Thread.sleep(2000); + + System.out.println("REACH: " + drpc.execute("reach", "aaa")); + System.out.println("REACH: " + drpc.execute("reach", "foo.com/blog/1")); + System.out.println("REACH: " + drpc.execute("reach", "engineering.twitter.com/blog/5")); + } + } + + public static class StaticSingleKeyMapState extends ReadOnlyState implements ReadOnlyMapState { + Map map; + + public StaticSingleKeyMapState(Map map) { + this.map = map; + } + + @Override + public List multiGet(List> keys) { + List ret = new ArrayList(); + for (List key : keys) { + Object singleKey = key.get(0); + ret.add(map.get(singleKey)); + } + return ret; + } + + public static class Factory implements StateFactory { + Map map; + + public Factory(Map map) { + this.map = map; + } + + @Override + public State makeState(Map conf, IMetricsContext metrics, int partitionIndex, int numPartitions) { + return new StaticSingleKeyMapState(map); + } + + } + + } + + public static class One implements CombinerAggregator { + @Override + public Integer init(TridentTuple tuple) { + return 1; + } + + @Override + public Integer combine(Integer val1, Integer val2) { + return 1; + } + + @Override + public Integer zero() { + return 1; + } + } + + public static class ExpandList extends BaseFunction { + + @Override + public void execute(TridentTuple tuple, TridentCollector collector) { + List l = (List) tuple.getValue(0); + if (l != null) { + for (Object o : l) { + collector.emit(new Values(o)); + } + } + } + + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentWindowingInmemoryStoreTopology.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentWindowingInmemoryStoreTopology.java new file mode 100644 index 00000000000..862d09a237d --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentWindowingInmemoryStoreTopology.java @@ -0,0 +1,73 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.trident; + +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.generated.StormTopology; +import org.apache.storm.trident.Stream; +import org.apache.storm.trident.TridentTopology; +import org.apache.storm.trident.operation.Consumer; +import org.apache.storm.trident.testing.CountAsAggregator; +import org.apache.storm.trident.testing.FixedBatchSpout; +import org.apache.storm.trident.testing.Split; +import org.apache.storm.trident.tuple.TridentTuple; +import org.apache.storm.trident.windowing.InMemoryWindowsStoreFactory; +import org.apache.storm.trident.windowing.WindowsStoreFactory; +import org.apache.storm.trident.windowing.config.SlidingCountWindow; +import org.apache.storm.trident.windowing.config.WindowConfig; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Values; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Sample application of trident windowing which uses inmemory store for storing tuples in window. + */ +public class TridentWindowingInmemoryStoreTopology { + private static final Logger LOG = LoggerFactory.getLogger(TridentWindowingInmemoryStoreTopology.class); + + public static StormTopology buildTopology(WindowsStoreFactory windowStore, WindowConfig windowConfig) throws Exception { + FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence"), 3, new Values("the cow jumped over the moon"), + new Values("the man went to the store and bought some candy"), + new Values("four score and seven years ago"), + new Values("how many apples can you eat"), new Values("to be or not to be the person")); + spout.setCycle(true); + + TridentTopology topology = new TridentTopology(); + + Stream stream = topology.newStream("spout1", spout).parallelismHint(16).each(new Fields("sentence"), + new Split(), new Fields("word")) + .window(windowConfig, windowStore, new Fields("word"), new CountAsAggregator(), new Fields("count")) + .peek(new Consumer() { + @Override + public void accept(TridentTuple input) { + LOG.info("Received tuple: [{}]", input); + } + }); + + return topology.build(); + } + + public static void main(String[] args) throws Exception { + Config conf = new Config(); + WindowsStoreFactory mapState = new InMemoryWindowsStoreFactory(); + String topoName = "wordCounter"; + if (args.length > 0) { + topoName = args[0]; + } + + conf.setNumWorkers(3); + StormSubmitter.submitTopologyWithProgressBar(topoName, conf, buildTopology(mapState, SlidingCountWindow.of(1000, 100))); + } +} diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentWordCount.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentWordCount.java new file mode 100644 index 00000000000..bafeba29c24 --- /dev/null +++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentWordCount.java @@ -0,0 +1,82 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.trident; + +import org.apache.storm.Config; +import org.apache.storm.StormSubmitter; +import org.apache.storm.generated.StormTopology; +import org.apache.storm.trident.TridentState; +import org.apache.storm.trident.TridentTopology; +import org.apache.storm.trident.operation.BaseFunction; +import org.apache.storm.trident.operation.TridentCollector; +import org.apache.storm.trident.operation.builtin.Count; +import org.apache.storm.trident.operation.builtin.FilterNull; +import org.apache.storm.trident.operation.builtin.MapGet; +import org.apache.storm.trident.testing.FixedBatchSpout; +import org.apache.storm.trident.testing.MemoryMapState; +import org.apache.storm.trident.tuple.TridentTuple; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Values; +import org.apache.storm.utils.DRPCClient; + + +public class TridentWordCount { + public static StormTopology buildTopology() { + FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence"), 3, new Values("the cow jumped over the moon"), + new Values("the man went to the store and bought some candy"), + new Values("four score and seven years ago"), + new Values("how many apples can you eat"), new Values("to be or not to be the person")); + spout.setCycle(true); + + TridentTopology topology = new TridentTopology(); + TridentState wordCounts = topology.newStream("spout1", spout).parallelismHint(16).each(new Fields("sentence"), + new Split(), new Fields("word")) + .groupBy(new Fields("word")).persistentAggregate(new MemoryMapState.Factory(), + new Count(), new Fields("count")) + .parallelismHint(16); + + topology.newDRPCStream("words").each(new Fields("args"), new Split(), new Fields("word")) + .groupBy(new Fields("word")) + .stateQuery(wordCounts, new Fields("word"), new MapGet(), new Fields("count")) + .each(new Fields("count"), new FilterNull()) + .project(new Fields("word", "count")); + return topology.build(); + } + + public static void main(String[] args) throws Exception { + Config conf = new Config(); + conf.setMaxSpoutPending(20); + String topoName = "wordCounter"; + if (args.length > 0) { + topoName = args[0]; + } + conf.setNumWorkers(3); + StormSubmitter.submitTopologyWithProgressBar(topoName, conf, buildTopology()); + try (DRPCClient drpc = DRPCClient.getConfiguredClient(conf)) { + for (int i = 0; i < 10; i++) { + System.out.println("DRPC RESULT: " + drpc.execute("words", "cat the dog jumped")); + Thread.sleep(1000); + } + } + } + + public static class Split extends BaseFunction { + @Override + public void execute(TridentTuple tuple, TridentCollector collector) { + String sentence = tuple.getString(0); + for (String word : sentence.split(" ")) { + collector.emit(new Values(word)); + } + } + } +} diff --git a/examples/storm-starter/src/jvm/storm/starter/BasicDRPCTopology.java b/examples/storm-starter/src/jvm/storm/starter/BasicDRPCTopology.java deleted file mode 100644 index 1b29738f03a..00000000000 --- a/examples/storm-starter/src/jvm/storm/starter/BasicDRPCTopology.java +++ /dev/null @@ -1,78 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter; - -import backtype.storm.Config; -import backtype.storm.LocalCluster; -import backtype.storm.LocalDRPC; -import backtype.storm.StormSubmitter; -import backtype.storm.drpc.LinearDRPCTopologyBuilder; -import backtype.storm.topology.BasicOutputCollector; -import backtype.storm.topology.OutputFieldsDeclarer; -import backtype.storm.topology.base.BaseBasicBolt; -import backtype.storm.tuple.Fields; -import backtype.storm.tuple.Tuple; -import backtype.storm.tuple.Values; - -/** - * This topology is a basic example of doing distributed RPC on top of Storm. It implements a function that appends a - * "!" to any string you send the DRPC function. - *

- * See https://github.com/nathanmarz/storm/wiki/Distributed-RPC for more information on doing distributed RPC on top of - * Storm. - */ -public class BasicDRPCTopology { - public static class ExclaimBolt extends BaseBasicBolt { - @Override - public void execute(Tuple tuple, BasicOutputCollector collector) { - String input = tuple.getString(1); - collector.emit(new Values(tuple.getValue(0), input + "!")); - } - - @Override - public void declareOutputFields(OutputFieldsDeclarer declarer) { - declarer.declare(new Fields("id", "result")); - } - - } - - public static void main(String[] args) throws Exception { - LinearDRPCTopologyBuilder builder = new LinearDRPCTopologyBuilder("exclamation"); - builder.addBolt(new ExclaimBolt(), 3); - - Config conf = new Config(); - - if (args == null || args.length == 0) { - LocalDRPC drpc = new LocalDRPC(); - LocalCluster cluster = new LocalCluster(); - - cluster.submitTopology("drpc-demo", conf, builder.createLocalTopology(drpc)); - - for (String word : new String[]{ "hello", "goodbye" }) { - System.out.println("Result for \"" + word + "\": " + drpc.execute("exclamation", word)); - } - - cluster.shutdown(); - drpc.shutdown(); - } - else { - conf.setNumWorkers(3); - StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createRemoteTopology()); - } - } -} diff --git a/examples/storm-starter/src/jvm/storm/starter/ExclamationTopology.java b/examples/storm-starter/src/jvm/storm/starter/ExclamationTopology.java deleted file mode 100644 index d7b1b3e4232..00000000000 --- a/examples/storm-starter/src/jvm/storm/starter/ExclamationTopology.java +++ /dev/null @@ -1,87 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter; - -import backtype.storm.Config; -import backtype.storm.LocalCluster; -import backtype.storm.StormSubmitter; -import backtype.storm.task.OutputCollector; -import backtype.storm.task.TopologyContext; -import backtype.storm.testing.TestWordSpout; -import backtype.storm.topology.OutputFieldsDeclarer; -import backtype.storm.topology.TopologyBuilder; -import backtype.storm.topology.base.BaseRichBolt; -import backtype.storm.tuple.Fields; -import backtype.storm.tuple.Tuple; -import backtype.storm.tuple.Values; -import backtype.storm.utils.Utils; - -import java.util.Map; - -/** - * This is a basic example of a Storm topology. - */ -public class ExclamationTopology { - - public static class ExclamationBolt extends BaseRichBolt { - OutputCollector _collector; - - @Override - public void prepare(Map conf, TopologyContext context, OutputCollector collector) { - _collector = collector; - } - - @Override - public void execute(Tuple tuple) { - _collector.emit(tuple, new Values(tuple.getString(0) + "!!!")); - _collector.ack(tuple); - } - - @Override - public void declareOutputFields(OutputFieldsDeclarer declarer) { - declarer.declare(new Fields("word")); - } - - - } - - public static void main(String[] args) throws Exception { - TopologyBuilder builder = new TopologyBuilder(); - - builder.setSpout("word", new TestWordSpout(), 10); - builder.setBolt("exclaim1", new ExclamationBolt(), 3).shuffleGrouping("word"); - builder.setBolt("exclaim2", new ExclamationBolt(), 2).shuffleGrouping("exclaim1"); - - Config conf = new Config(); - conf.setDebug(true); - - if (args != null && args.length > 0) { - conf.setNumWorkers(3); - - StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology()); - } - else { - - LocalCluster cluster = new LocalCluster(); - cluster.submitTopology("test", conf, builder.createTopology()); - Utils.sleep(10000); - cluster.killTopology("test"); - cluster.shutdown(); - } - } -} diff --git a/examples/storm-starter/src/jvm/storm/starter/ManualDRPC.java b/examples/storm-starter/src/jvm/storm/starter/ManualDRPC.java deleted file mode 100644 index fe0bae279d9..00000000000 --- a/examples/storm-starter/src/jvm/storm/starter/ManualDRPC.java +++ /dev/null @@ -1,68 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter; - -import backtype.storm.Config; -import backtype.storm.LocalCluster; -import backtype.storm.LocalDRPC; -import backtype.storm.drpc.DRPCSpout; -import backtype.storm.drpc.ReturnResults; -import backtype.storm.topology.BasicOutputCollector; -import backtype.storm.topology.OutputFieldsDeclarer; -import backtype.storm.topology.TopologyBuilder; -import backtype.storm.topology.base.BaseBasicBolt; -import backtype.storm.tuple.Fields; -import backtype.storm.tuple.Tuple; -import backtype.storm.tuple.Values; - - -public class ManualDRPC { - public static class ExclamationBolt extends BaseBasicBolt { - - @Override - public void declareOutputFields(OutputFieldsDeclarer declarer) { - declarer.declare(new Fields("result", "return-info")); - } - - @Override - public void execute(Tuple tuple, BasicOutputCollector collector) { - String arg = tuple.getString(0); - Object retInfo = tuple.getValue(1); - collector.emit(new Values(arg + "!!!", retInfo)); - } - - } - - public static void main(String[] args) { - TopologyBuilder builder = new TopologyBuilder(); - LocalDRPC drpc = new LocalDRPC(); - - DRPCSpout spout = new DRPCSpout("exclamation", drpc); - builder.setSpout("drpc", spout); - builder.setBolt("exclaim", new ExclamationBolt(), 3).shuffleGrouping("drpc"); - builder.setBolt("return", new ReturnResults(), 3).shuffleGrouping("exclaim"); - - LocalCluster cluster = new LocalCluster(); - Config conf = new Config(); - cluster.submitTopology("exclaim", conf, builder.createTopology()); - - System.out.println(drpc.execute("exclamation", "aaa")); - System.out.println(drpc.execute("exclamation", "bbb")); - - } -} diff --git a/examples/storm-starter/src/jvm/storm/starter/PrintSampleStream.java b/examples/storm-starter/src/jvm/storm/starter/PrintSampleStream.java deleted file mode 100644 index 2734fffac31..00000000000 --- a/examples/storm-starter/src/jvm/storm/starter/PrintSampleStream.java +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package storm.starter; - -import java.util.Arrays; - -import backtype.storm.Config; -import backtype.storm.LocalCluster; -import backtype.storm.topology.TopologyBuilder; -import backtype.storm.utils.Utils; - -import storm.starter.bolt.PrinterBolt; -import storm.starter.spout.TwitterSampleSpout; - -public class PrintSampleStream { - public static void main(String[] args) { - String consumerKey = args[0]; - String consumerSecret = args[1]; - String accessToken = args[2]; - String accessTokenSecret = args[3]; - String[] arguments = args.clone(); - String[] keyWords = Arrays.copyOfRange(arguments, 4, arguments.length); - - TopologyBuilder builder = new TopologyBuilder(); - - builder.setSpout("spoutId", new TwitterSampleSpout(consumerKey, consumerSecret, - accessToken, accessTokenSecret, keyWords)); - builder.setBolt("print", new PrinterBolt()) - .shuffleGrouping("spout"); - - - Config conf = new Config(); - - - LocalCluster cluster = new LocalCluster(); - - cluster.submitTopology("test", conf, builder.createTopology()); - - Utils.sleep(10000); - cluster.shutdown(); - } -} diff --git a/examples/storm-starter/src/jvm/storm/starter/ReachTopology.java b/examples/storm-starter/src/jvm/storm/starter/ReachTopology.java deleted file mode 100644 index 2c5c8ba4544..00000000000 --- a/examples/storm-starter/src/jvm/storm/starter/ReachTopology.java +++ /dev/null @@ -1,196 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter; - -import backtype.storm.Config; -import backtype.storm.LocalCluster; -import backtype.storm.LocalDRPC; -import backtype.storm.StormSubmitter; -import backtype.storm.coordination.BatchOutputCollector; -import backtype.storm.drpc.LinearDRPCTopologyBuilder; -import backtype.storm.task.TopologyContext; -import backtype.storm.topology.BasicOutputCollector; -import backtype.storm.topology.OutputFieldsDeclarer; -import backtype.storm.topology.base.BaseBasicBolt; -import backtype.storm.topology.base.BaseBatchBolt; -import backtype.storm.tuple.Fields; -import backtype.storm.tuple.Tuple; -import backtype.storm.tuple.Values; - -import java.util.*; - -/** - * This is a good example of doing complex Distributed RPC on top of Storm. This program creates a topology that can - * compute the reach for any URL on Twitter in realtime by parallelizing the whole computation. - *

- * Reach is the number of unique people exposed to a URL on Twitter. To compute reach, you have to get all the people - * who tweeted the URL, get all the followers of all those people, unique that set of followers, and then count the - * unique set. It's an intense computation that can involve thousands of database calls and tens of millions of follower - * records. - *

- * This Storm topology does every piece of that computation in parallel, turning what would be a computation that takes - * minutes on a single machine into one that takes just a couple seconds. - *

- * For the purposes of demonstration, this topology replaces the use of actual DBs with in-memory hashmaps. - *

- * See https://github.com/nathanmarz/storm/wiki/Distributed-RPC for more information on Distributed RPC. - */ -public class ReachTopology { - public static Map> TWEETERS_DB = new HashMap>() {{ - put("foo.com/blog/1", Arrays.asList("sally", "bob", "tim", "george", "nathan")); - put("engineering.twitter.com/blog/5", Arrays.asList("adam", "david", "sally", "nathan")); - put("tech.backtype.com/blog/123", Arrays.asList("tim", "mike", "john")); - }}; - - public static Map> FOLLOWERS_DB = new HashMap>() {{ - put("sally", Arrays.asList("bob", "tim", "alice", "adam", "jim", "chris", "jai")); - put("bob", Arrays.asList("sally", "nathan", "jim", "mary", "david", "vivian")); - put("tim", Arrays.asList("alex")); - put("nathan", Arrays.asList("sally", "bob", "adam", "harry", "chris", "vivian", "emily", "jordan")); - put("adam", Arrays.asList("david", "carissa")); - put("mike", Arrays.asList("john", "bob")); - put("john", Arrays.asList("alice", "nathan", "jim", "mike", "bob")); - }}; - - public static class GetTweeters extends BaseBasicBolt { - @Override - public void execute(Tuple tuple, BasicOutputCollector collector) { - Object id = tuple.getValue(0); - String url = tuple.getString(1); - List tweeters = TWEETERS_DB.get(url); - if (tweeters != null) { - for (String tweeter : tweeters) { - collector.emit(new Values(id, tweeter)); - } - } - } - - @Override - public void declareOutputFields(OutputFieldsDeclarer declarer) { - declarer.declare(new Fields("id", "tweeter")); - } - } - - public static class GetFollowers extends BaseBasicBolt { - @Override - public void execute(Tuple tuple, BasicOutputCollector collector) { - Object id = tuple.getValue(0); - String tweeter = tuple.getString(1); - List followers = FOLLOWERS_DB.get(tweeter); - if (followers != null) { - for (String follower : followers) { - collector.emit(new Values(id, follower)); - } - } - } - - @Override - public void declareOutputFields(OutputFieldsDeclarer declarer) { - declarer.declare(new Fields("id", "follower")); - } - } - - public static class PartialUniquer extends BaseBatchBolt { - BatchOutputCollector _collector; - Object _id; - Set _followers = new HashSet(); - - @Override - public void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, Object id) { - _collector = collector; - _id = id; - } - - @Override - public void execute(Tuple tuple) { - _followers.add(tuple.getString(1)); - } - - @Override - public void finishBatch() { - _collector.emit(new Values(_id, _followers.size())); - } - - @Override - public void declareOutputFields(OutputFieldsDeclarer declarer) { - declarer.declare(new Fields("id", "partial-count")); - } - } - - public static class CountAggregator extends BaseBatchBolt { - BatchOutputCollector _collector; - Object _id; - int _count = 0; - - @Override - public void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, Object id) { - _collector = collector; - _id = id; - } - - @Override - public void execute(Tuple tuple) { - _count += tuple.getInteger(1); - } - - @Override - public void finishBatch() { - _collector.emit(new Values(_id, _count)); - } - - @Override - public void declareOutputFields(OutputFieldsDeclarer declarer) { - declarer.declare(new Fields("id", "reach")); - } - } - - public static LinearDRPCTopologyBuilder construct() { - LinearDRPCTopologyBuilder builder = new LinearDRPCTopologyBuilder("reach"); - builder.addBolt(new GetTweeters(), 4); - builder.addBolt(new GetFollowers(), 12).shuffleGrouping(); - builder.addBolt(new PartialUniquer(), 6).fieldsGrouping(new Fields("id", "follower")); - builder.addBolt(new CountAggregator(), 3).fieldsGrouping(new Fields("id")); - return builder; - } - - public static void main(String[] args) throws Exception { - LinearDRPCTopologyBuilder builder = construct(); - - - Config conf = new Config(); - - if (args == null || args.length == 0) { - conf.setMaxTaskParallelism(3); - LocalDRPC drpc = new LocalDRPC(); - LocalCluster cluster = new LocalCluster(); - cluster.submitTopology("reach-drpc", conf, builder.createLocalTopology(drpc)); - - String[] urlsToTry = new String[]{ "foo.com/blog/1", "engineering.twitter.com/blog/5", "notaurl.com" }; - for (String url : urlsToTry) { - System.out.println("Reach of " + url + ": " + drpc.execute("reach", url)); - } - - cluster.shutdown(); - drpc.shutdown(); - } - else { - conf.setNumWorkers(6); - StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createRemoteTopology()); - } - } -} diff --git a/examples/storm-starter/src/jvm/storm/starter/RollingTopWords.java b/examples/storm-starter/src/jvm/storm/starter/RollingTopWords.java deleted file mode 100644 index 2630557db84..00000000000 --- a/examples/storm-starter/src/jvm/storm/starter/RollingTopWords.java +++ /dev/null @@ -1,78 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter; - -import backtype.storm.Config; -import backtype.storm.testing.TestWordSpout; -import backtype.storm.topology.TopologyBuilder; -import backtype.storm.tuple.Fields; -import storm.starter.bolt.IntermediateRankingsBolt; -import storm.starter.bolt.RollingCountBolt; -import storm.starter.bolt.TotalRankingsBolt; -import storm.starter.util.StormRunner; - -/** - * This topology does a continuous computation of the top N words that the topology has seen in terms of cardinality. - * The top N computation is done in a completely scalable way, and a similar approach could be used to compute things - * like trending topics or trending images on Twitter. - */ -public class RollingTopWords { - - private static final int DEFAULT_RUNTIME_IN_SECONDS = 60; - private static final int TOP_N = 5; - - private final TopologyBuilder builder; - private final String topologyName; - private final Config topologyConfig; - private final int runtimeInSeconds; - - public RollingTopWords() throws InterruptedException { - builder = new TopologyBuilder(); - topologyName = "slidingWindowCounts"; - topologyConfig = createTopologyConfiguration(); - runtimeInSeconds = DEFAULT_RUNTIME_IN_SECONDS; - - wireTopology(); - } - - private static Config createTopologyConfiguration() { - Config conf = new Config(); - conf.setDebug(true); - return conf; - } - - private void wireTopology() throws InterruptedException { - String spoutId = "wordGenerator"; - String counterId = "counter"; - String intermediateRankerId = "intermediateRanker"; - String totalRankerId = "finalRanker"; - builder.setSpout(spoutId, new TestWordSpout(), 5); - builder.setBolt(counterId, new RollingCountBolt(9, 3), 4).fieldsGrouping(spoutId, new Fields("word")); - builder.setBolt(intermediateRankerId, new IntermediateRankingsBolt(TOP_N), 4).fieldsGrouping(counterId, new Fields( - "obj")); - builder.setBolt(totalRankerId, new TotalRankingsBolt(TOP_N)).globalGrouping(intermediateRankerId); - } - - public void run() throws InterruptedException { - StormRunner.runTopologyLocally(builder.createTopology(), topologyName, topologyConfig, runtimeInSeconds); - } - - public static void main(String[] args) throws Exception { - new RollingTopWords().run(); - } -} diff --git a/examples/storm-starter/src/jvm/storm/starter/SingleJoinExample.java b/examples/storm-starter/src/jvm/storm/starter/SingleJoinExample.java deleted file mode 100644 index cb1d98c3a72..00000000000 --- a/examples/storm-starter/src/jvm/storm/starter/SingleJoinExample.java +++ /dev/null @@ -1,64 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter; - -import backtype.storm.Config; -import backtype.storm.LocalCluster; -import backtype.storm.testing.FeederSpout; -import backtype.storm.topology.TopologyBuilder; -import backtype.storm.tuple.Fields; -import backtype.storm.tuple.Values; -import backtype.storm.utils.Utils; -import storm.starter.bolt.SingleJoinBolt; - -public class SingleJoinExample { - public static void main(String[] args) { - FeederSpout genderSpout = new FeederSpout(new Fields("id", "gender")); - FeederSpout ageSpout = new FeederSpout(new Fields("id", "age")); - - TopologyBuilder builder = new TopologyBuilder(); - builder.setSpout("gender", genderSpout); - builder.setSpout("age", ageSpout); - builder.setBolt("join", new SingleJoinBolt(new Fields("gender", "age"))).fieldsGrouping("gender", new Fields("id")) - .fieldsGrouping("age", new Fields("id")); - - Config conf = new Config(); - conf.setDebug(true); - - LocalCluster cluster = new LocalCluster(); - cluster.submitTopology("join-example", conf, builder.createTopology()); - - for (int i = 0; i < 10; i++) { - String gender; - if (i % 2 == 0) { - gender = "male"; - } - else { - gender = "female"; - } - genderSpout.feed(new Values(i, gender)); - } - - for (int i = 9; i >= 0; i--) { - ageSpout.feed(new Values(i, i + 20)); - } - - Utils.sleep(2000); - cluster.shutdown(); - } -} diff --git a/examples/storm-starter/src/jvm/storm/starter/TransactionalGlobalCount.java b/examples/storm-starter/src/jvm/storm/starter/TransactionalGlobalCount.java deleted file mode 100644 index d8ff78dff2a..00000000000 --- a/examples/storm-starter/src/jvm/storm/starter/TransactionalGlobalCount.java +++ /dev/null @@ -1,173 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter; - -import backtype.storm.Config; -import backtype.storm.LocalCluster; -import backtype.storm.coordination.BatchOutputCollector; -import backtype.storm.task.TopologyContext; -import backtype.storm.testing.MemoryTransactionalSpout; -import backtype.storm.topology.OutputFieldsDeclarer; -import backtype.storm.topology.base.BaseBatchBolt; -import backtype.storm.topology.base.BaseTransactionalBolt; -import backtype.storm.transactional.ICommitter; -import backtype.storm.transactional.TransactionAttempt; -import backtype.storm.transactional.TransactionalTopologyBuilder; -import backtype.storm.tuple.Fields; -import backtype.storm.tuple.Tuple; -import backtype.storm.tuple.Values; - -import java.math.BigInteger; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/** - * This is a basic example of a transactional topology. It keeps a count of the number of tuples seen so far in a - * database. The source of data and the databases are mocked out as in memory maps for demonstration purposes. This - * class is defined in depth on the wiki at https://github.com/nathanmarz/storm/wiki/Transactional-topologies - */ -public class TransactionalGlobalCount { - public static final int PARTITION_TAKE_PER_BATCH = 3; - public static final Map>> DATA = new HashMap>>() {{ - put(0, new ArrayList>() {{ - add(new Values("cat")); - add(new Values("dog")); - add(new Values("chicken")); - add(new Values("cat")); - add(new Values("dog")); - add(new Values("apple")); - }}); - put(1, new ArrayList>() {{ - add(new Values("cat")); - add(new Values("dog")); - add(new Values("apple")); - add(new Values("banana")); - }}); - put(2, new ArrayList>() {{ - add(new Values("cat")); - add(new Values("cat")); - add(new Values("cat")); - add(new Values("cat")); - add(new Values("cat")); - add(new Values("dog")); - add(new Values("dog")); - add(new Values("dog")); - add(new Values("dog")); - }}); - }}; - - public static class Value { - int count = 0; - BigInteger txid; - } - - public static Map DATABASE = new HashMap(); - public static final String GLOBAL_COUNT_KEY = "GLOBAL-COUNT"; - - public static class BatchCount extends BaseBatchBolt { - Object _id; - BatchOutputCollector _collector; - - int _count = 0; - - @Override - public void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, Object id) { - _collector = collector; - _id = id; - } - - @Override - public void execute(Tuple tuple) { - _count++; - } - - @Override - public void finishBatch() { - _collector.emit(new Values(_id, _count)); - } - - @Override - public void declareOutputFields(OutputFieldsDeclarer declarer) { - declarer.declare(new Fields("id", "count")); - } - } - - public static class UpdateGlobalCount extends BaseTransactionalBolt implements ICommitter { - TransactionAttempt _attempt; - BatchOutputCollector _collector; - - int _sum = 0; - - @Override - public void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, TransactionAttempt attempt) { - _collector = collector; - _attempt = attempt; - } - - @Override - public void execute(Tuple tuple) { - _sum += tuple.getInteger(1); - } - - @Override - public void finishBatch() { - Value val = DATABASE.get(GLOBAL_COUNT_KEY); - Value newval; - if (val == null || !val.txid.equals(_attempt.getTransactionId())) { - newval = new Value(); - newval.txid = _attempt.getTransactionId(); - if (val == null) { - newval.count = _sum; - } - else { - newval.count = _sum + val.count; - } - DATABASE.put(GLOBAL_COUNT_KEY, newval); - } - else { - newval = val; - } - _collector.emit(new Values(_attempt, newval.count)); - } - - @Override - public void declareOutputFields(OutputFieldsDeclarer declarer) { - declarer.declare(new Fields("id", "sum")); - } - } - - public static void main(String[] args) throws Exception { - MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH); - TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("global-count", "spout", spout, 3); - builder.setBolt("partial-count", new BatchCount(), 5).noneGrouping("spout"); - builder.setBolt("sum", new UpdateGlobalCount()).globalGrouping("partial-count"); - - LocalCluster cluster = new LocalCluster(); - - Config config = new Config(); - config.setDebug(true); - config.setMaxSpoutPending(3); - - cluster.submitTopology("global-count-topology", config, builder.buildTopology()); - - Thread.sleep(3000); - cluster.shutdown(); - } -} diff --git a/examples/storm-starter/src/jvm/storm/starter/TransactionalWords.java b/examples/storm-starter/src/jvm/storm/starter/TransactionalWords.java deleted file mode 100644 index 4d5ba1b179e..00000000000 --- a/examples/storm-starter/src/jvm/storm/starter/TransactionalWords.java +++ /dev/null @@ -1,246 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter; - -import backtype.storm.Config; -import backtype.storm.LocalCluster; -import backtype.storm.coordination.BatchOutputCollector; -import backtype.storm.task.TopologyContext; -import backtype.storm.testing.MemoryTransactionalSpout; -import backtype.storm.topology.BasicOutputCollector; -import backtype.storm.topology.OutputFieldsDeclarer; -import backtype.storm.topology.base.BaseBasicBolt; -import backtype.storm.topology.base.BaseTransactionalBolt; -import backtype.storm.transactional.ICommitter; -import backtype.storm.transactional.TransactionAttempt; -import backtype.storm.transactional.TransactionalTopologyBuilder; -import backtype.storm.tuple.Fields; -import backtype.storm.tuple.Tuple; -import backtype.storm.tuple.Values; - -import java.math.BigInteger; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/** - * This class defines a more involved transactional topology then TransactionalGlobalCount. This topology processes a - * stream of words and produces two outputs: - *

- * 1. A count for each word (stored in a database) 2. The number of words for every bucket of 10 counts. So it stores in - * the database how many words have appeared 0-9 times, how many have appeared 10-19 times, and so on. - *

- * A batch of words can cause the bucket counts to decrement for some buckets and increment for others as words move - * between buckets as their counts accumulate. - */ -public class TransactionalWords { - public static class CountValue { - Integer prev_count = null; - int count = 0; - BigInteger txid = null; - } - - public static class BucketValue { - int count = 0; - BigInteger txid; - } - - public static final int BUCKET_SIZE = 10; - - public static Map COUNT_DATABASE = new HashMap(); - public static Map BUCKET_DATABASE = new HashMap(); - - - public static final int PARTITION_TAKE_PER_BATCH = 3; - - public static final Map>> DATA = new HashMap>>() {{ - put(0, new ArrayList>() {{ - add(new Values("cat")); - add(new Values("dog")); - add(new Values("chicken")); - add(new Values("cat")); - add(new Values("dog")); - add(new Values("apple")); - }}); - put(1, new ArrayList>() {{ - add(new Values("cat")); - add(new Values("dog")); - add(new Values("apple")); - add(new Values("banana")); - }}); - put(2, new ArrayList>() {{ - add(new Values("cat")); - add(new Values("cat")); - add(new Values("cat")); - add(new Values("cat")); - add(new Values("cat")); - add(new Values("dog")); - add(new Values("dog")); - add(new Values("dog")); - add(new Values("dog")); - }}); - }}; - - public static class KeyedCountUpdater extends BaseTransactionalBolt implements ICommitter { - Map _counts = new HashMap(); - BatchOutputCollector _collector; - TransactionAttempt _id; - - int _count = 0; - - @Override - public void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, TransactionAttempt id) { - _collector = collector; - _id = id; - } - - @Override - public void execute(Tuple tuple) { - String key = tuple.getString(1); - Integer curr = _counts.get(key); - if (curr == null) - curr = 0; - _counts.put(key, curr + 1); - } - - @Override - public void finishBatch() { - for (String key : _counts.keySet()) { - CountValue val = COUNT_DATABASE.get(key); - CountValue newVal; - if (val == null || !val.txid.equals(_id)) { - newVal = new CountValue(); - newVal.txid = _id.getTransactionId(); - if (val != null) { - newVal.prev_count = val.count; - newVal.count = val.count; - } - newVal.count = newVal.count + _counts.get(key); - COUNT_DATABASE.put(key, newVal); - } - else { - newVal = val; - } - _collector.emit(new Values(_id, key, newVal.count, newVal.prev_count)); - } - } - - @Override - public void declareOutputFields(OutputFieldsDeclarer declarer) { - declarer.declare(new Fields("id", "key", "count", "prev-count")); - } - } - - public static class Bucketize extends BaseBasicBolt { - @Override - public void execute(Tuple tuple, BasicOutputCollector collector) { - TransactionAttempt attempt = (TransactionAttempt) tuple.getValue(0); - int curr = tuple.getInteger(2); - Integer prev = tuple.getInteger(3); - - int currBucket = curr / BUCKET_SIZE; - Integer prevBucket = null; - if (prev != null) { - prevBucket = prev / BUCKET_SIZE; - } - - if (prevBucket == null) { - collector.emit(new Values(attempt, currBucket, 1)); - } - else if (currBucket != prevBucket) { - collector.emit(new Values(attempt, currBucket, 1)); - collector.emit(new Values(attempt, prevBucket, -1)); - } - } - - @Override - public void declareOutputFields(OutputFieldsDeclarer declarer) { - declarer.declare(new Fields("attempt", "bucket", "delta")); - } - } - - public static class BucketCountUpdater extends BaseTransactionalBolt { - Map _accum = new HashMap(); - BatchOutputCollector _collector; - TransactionAttempt _attempt; - - int _count = 0; - - @Override - public void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, TransactionAttempt attempt) { - _collector = collector; - _attempt = attempt; - } - - @Override - public void execute(Tuple tuple) { - Integer bucket = tuple.getInteger(1); - Integer delta = tuple.getInteger(2); - Integer curr = _accum.get(bucket); - if (curr == null) - curr = 0; - _accum.put(bucket, curr + delta); - } - - @Override - public void finishBatch() { - for (Integer bucket : _accum.keySet()) { - BucketValue currVal = BUCKET_DATABASE.get(bucket); - BucketValue newVal; - if (currVal == null || !currVal.txid.equals(_attempt.getTransactionId())) { - newVal = new BucketValue(); - newVal.txid = _attempt.getTransactionId(); - newVal.count = _accum.get(bucket); - if (currVal != null) - newVal.count += currVal.count; - BUCKET_DATABASE.put(bucket, newVal); - } - else { - newVal = currVal; - } - _collector.emit(new Values(_attempt, bucket, newVal.count)); - } - } - - @Override - public void declareOutputFields(OutputFieldsDeclarer declarer) { - declarer.declare(new Fields("id", "bucket", "count")); - } - } - - public static void main(String[] args) throws Exception { - MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH); - TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("top-n-words", "spout", spout, 2); - builder.setBolt("count", new KeyedCountUpdater(), 5).fieldsGrouping("spout", new Fields("word")); - builder.setBolt("bucketize", new Bucketize()).noneGrouping("count"); - builder.setBolt("buckets", new BucketCountUpdater(), 5).fieldsGrouping("bucketize", new Fields("bucket")); - - - LocalCluster cluster = new LocalCluster(); - - Config config = new Config(); - config.setDebug(true); - config.setMaxSpoutPending(3); - - cluster.submitTopology("top-n-topology", config, builder.buildTopology()); - - Thread.sleep(3000); - cluster.shutdown(); - } -} diff --git a/examples/storm-starter/src/jvm/storm/starter/WordCountTopology.java b/examples/storm-starter/src/jvm/storm/starter/WordCountTopology.java deleted file mode 100644 index 39184daa3e8..00000000000 --- a/examples/storm-starter/src/jvm/storm/starter/WordCountTopology.java +++ /dev/null @@ -1,107 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter; - -import backtype.storm.Config; -import backtype.storm.LocalCluster; -import backtype.storm.StormSubmitter; -import backtype.storm.task.ShellBolt; -import backtype.storm.topology.BasicOutputCollector; -import backtype.storm.topology.IRichBolt; -import backtype.storm.topology.OutputFieldsDeclarer; -import backtype.storm.topology.TopologyBuilder; -import backtype.storm.topology.base.BaseBasicBolt; -import backtype.storm.tuple.Fields; -import backtype.storm.tuple.Tuple; -import backtype.storm.tuple.Values; -import storm.starter.spout.RandomSentenceSpout; - -import java.util.HashMap; -import java.util.Map; - -/** - * This topology demonstrates Storm's stream groupings and multilang capabilities. - */ -public class WordCountTopology { - public static class SplitSentence extends ShellBolt implements IRichBolt { - - public SplitSentence() { - super("python", "splitsentence.py"); - } - - @Override - public void declareOutputFields(OutputFieldsDeclarer declarer) { - declarer.declare(new Fields("word")); - } - - @Override - public Map getComponentConfiguration() { - return null; - } - } - - public static class WordCount extends BaseBasicBolt { - Map counts = new HashMap(); - - @Override - public void execute(Tuple tuple, BasicOutputCollector collector) { - String word = tuple.getString(0); - Integer count = counts.get(word); - if (count == null) - count = 0; - count++; - counts.put(word, count); - collector.emit(new Values(word, count)); - } - - @Override - public void declareOutputFields(OutputFieldsDeclarer declarer) { - declarer.declare(new Fields("word", "count")); - } - } - - public static void main(String[] args) throws Exception { - - TopologyBuilder builder = new TopologyBuilder(); - - builder.setSpout("spout", new RandomSentenceSpout(), 5); - - builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout"); - builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word")); - - Config conf = new Config(); - conf.setDebug(true); - - - if (args != null && args.length > 0) { - conf.setNumWorkers(3); - - StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology()); - } - else { - conf.setMaxTaskParallelism(3); - - LocalCluster cluster = new LocalCluster(); - cluster.submitTopology("word-count", conf, builder.createTopology()); - - Thread.sleep(10000); - - cluster.shutdown(); - } - } -} diff --git a/examples/storm-starter/src/jvm/storm/starter/bolt/AbstractRankerBolt.java b/examples/storm-starter/src/jvm/storm/starter/bolt/AbstractRankerBolt.java deleted file mode 100644 index cc5c0e77233..00000000000 --- a/examples/storm-starter/src/jvm/storm/starter/bolt/AbstractRankerBolt.java +++ /dev/null @@ -1,110 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter.bolt; - -import backtype.storm.Config; -import backtype.storm.topology.BasicOutputCollector; -import backtype.storm.topology.OutputFieldsDeclarer; -import backtype.storm.topology.base.BaseBasicBolt; -import backtype.storm.tuple.Fields; -import backtype.storm.tuple.Tuple; -import backtype.storm.tuple.Values; -import org.apache.log4j.Logger; -import storm.starter.tools.Rankings; -import storm.starter.util.TupleHelpers; - -import java.util.HashMap; -import java.util.Map; - -/** - * This abstract bolt provides the basic behavior of bolts that rank objects according to their count. - *

- * It uses a template method design pattern for {@link AbstractRankerBolt#execute(Tuple, BasicOutputCollector)} to allow - * actual bolt implementations to specify how incoming tuples are processed, i.e. how the objects embedded within those - * tuples are retrieved and counted. - */ -public abstract class AbstractRankerBolt extends BaseBasicBolt { - - private static final long serialVersionUID = 4931640198501530202L; - private static final int DEFAULT_EMIT_FREQUENCY_IN_SECONDS = 2; - private static final int DEFAULT_COUNT = 10; - - private final int emitFrequencyInSeconds; - private final int count; - private final Rankings rankings; - - public AbstractRankerBolt() { - this(DEFAULT_COUNT, DEFAULT_EMIT_FREQUENCY_IN_SECONDS); - } - - public AbstractRankerBolt(int topN) { - this(topN, DEFAULT_EMIT_FREQUENCY_IN_SECONDS); - } - - public AbstractRankerBolt(int topN, int emitFrequencyInSeconds) { - if (topN < 1) { - throw new IllegalArgumentException("topN must be >= 1 (you requested " + topN + ")"); - } - if (emitFrequencyInSeconds < 1) { - throw new IllegalArgumentException( - "The emit frequency must be >= 1 seconds (you requested " + emitFrequencyInSeconds + " seconds)"); - } - count = topN; - this.emitFrequencyInSeconds = emitFrequencyInSeconds; - rankings = new Rankings(count); - } - - protected Rankings getRankings() { - return rankings; - } - - /** - * This method functions as a template method (design pattern). - */ - @Override - public final void execute(Tuple tuple, BasicOutputCollector collector) { - if (TupleHelpers.isTickTuple(tuple)) { - getLogger().debug("Received tick tuple, triggering emit of current rankings"); - emitRankings(collector); - } - else { - updateRankingsWithTuple(tuple); - } - } - - abstract void updateRankingsWithTuple(Tuple tuple); - - private void emitRankings(BasicOutputCollector collector) { - collector.emit(new Values(rankings.copy())); - getLogger().debug("Rankings: " + rankings); - } - - @Override - public void declareOutputFields(OutputFieldsDeclarer declarer) { - declarer.declare(new Fields("rankings")); - } - - @Override - public Map getComponentConfiguration() { - Map conf = new HashMap(); - conf.put(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, emitFrequencyInSeconds); - return conf; - } - - abstract Logger getLogger(); -} diff --git a/examples/storm-starter/src/jvm/storm/starter/bolt/IntermediateRankingsBolt.java b/examples/storm-starter/src/jvm/storm/starter/bolt/IntermediateRankingsBolt.java deleted file mode 100644 index d1805ff375e..00000000000 --- a/examples/storm-starter/src/jvm/storm/starter/bolt/IntermediateRankingsBolt.java +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter.bolt; - -import backtype.storm.tuple.Tuple; -import org.apache.log4j.Logger; -import storm.starter.tools.Rankable; -import storm.starter.tools.RankableObjectWithFields; - -/** - * This bolt ranks incoming objects by their count. - *

- * It assumes the input tuples to adhere to the following format: (object, object_count, additionalField1, - * additionalField2, ..., additionalFieldN). - */ -public final class IntermediateRankingsBolt extends AbstractRankerBolt { - - private static final long serialVersionUID = -1369800530256637409L; - private static final Logger LOG = Logger.getLogger(IntermediateRankingsBolt.class); - - public IntermediateRankingsBolt() { - super(); - } - - public IntermediateRankingsBolt(int topN) { - super(topN); - } - - public IntermediateRankingsBolt(int topN, int emitFrequencyInSeconds) { - super(topN, emitFrequencyInSeconds); - } - - @Override - void updateRankingsWithTuple(Tuple tuple) { - Rankable rankable = RankableObjectWithFields.from(tuple); - super.getRankings().updateWith(rankable); - } - - @Override - Logger getLogger() { - return LOG; - } -} diff --git a/examples/storm-starter/src/jvm/storm/starter/bolt/PrinterBolt.java b/examples/storm-starter/src/jvm/storm/starter/bolt/PrinterBolt.java deleted file mode 100644 index 58fc8caf1a4..00000000000 --- a/examples/storm-starter/src/jvm/storm/starter/bolt/PrinterBolt.java +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter.bolt; - -import backtype.storm.topology.BasicOutputCollector; -import backtype.storm.topology.OutputFieldsDeclarer; -import backtype.storm.topology.base.BaseBasicBolt; -import backtype.storm.tuple.Tuple; - - -public class PrinterBolt extends BaseBasicBolt { - - @Override - public void execute(Tuple tuple, BasicOutputCollector collector) { - System.out.println(tuple); - } - - @Override - public void declareOutputFields(OutputFieldsDeclarer ofd) { - } - -} diff --git a/examples/storm-starter/src/jvm/storm/starter/bolt/RollingCountBolt.java b/examples/storm-starter/src/jvm/storm/starter/bolt/RollingCountBolt.java deleted file mode 100644 index f83906cea90..00000000000 --- a/examples/storm-starter/src/jvm/storm/starter/bolt/RollingCountBolt.java +++ /dev/null @@ -1,142 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter.bolt; - -import backtype.storm.Config; -import backtype.storm.task.OutputCollector; -import backtype.storm.task.TopologyContext; -import backtype.storm.topology.OutputFieldsDeclarer; -import backtype.storm.topology.base.BaseRichBolt; -import backtype.storm.tuple.Fields; -import backtype.storm.tuple.Tuple; -import backtype.storm.tuple.Values; -import org.apache.log4j.Logger; -import storm.starter.tools.NthLastModifiedTimeTracker; -import storm.starter.tools.SlidingWindowCounter; -import storm.starter.util.TupleHelpers; - -import java.util.HashMap; -import java.util.Map; -import java.util.Map.Entry; - -/** - * This bolt performs rolling counts of incoming objects, i.e. sliding window based counting. - *

- * The bolt is configured by two parameters, the length of the sliding window in seconds (which influences the output - * data of the bolt, i.e. how it will count objects) and the emit frequency in seconds (which influences how often the - * bolt will output the latest window counts). For instance, if the window length is set to an equivalent of five - * minutes and the emit frequency to one minute, then the bolt will output the latest five-minute sliding window every - * minute. - *

- * The bolt emits a rolling count tuple per object, consisting of the object itself, its latest rolling count, and the - * actual duration of the sliding window. The latter is included in case the expected sliding window length (as - * configured by the user) is different from the actual length, e.g. due to high system load. Note that the actual - * window length is tracked and calculated for the window, and not individually for each object within a window. - *

- * Note: During the startup phase you will usually observe that the bolt warns you about the actual sliding window - * length being smaller than the expected length. This behavior is expected and is caused by the way the sliding window - * counts are initially "loaded up". You can safely ignore this warning during startup (e.g. you will see this warning - * during the first ~ five minutes of startup time if the window length is set to five minutes). - */ -public class RollingCountBolt extends BaseRichBolt { - - private static final long serialVersionUID = 5537727428628598519L; - private static final Logger LOG = Logger.getLogger(RollingCountBolt.class); - private static final int NUM_WINDOW_CHUNKS = 5; - private static final int DEFAULT_SLIDING_WINDOW_IN_SECONDS = NUM_WINDOW_CHUNKS * 60; - private static final int DEFAULT_EMIT_FREQUENCY_IN_SECONDS = DEFAULT_SLIDING_WINDOW_IN_SECONDS / NUM_WINDOW_CHUNKS; - private static final String WINDOW_LENGTH_WARNING_TEMPLATE = - "Actual window length is %d seconds when it should be %d seconds" - + " (you can safely ignore this warning during the startup phase)"; - - private final SlidingWindowCounter counter; - private final int windowLengthInSeconds; - private final int emitFrequencyInSeconds; - private OutputCollector collector; - private NthLastModifiedTimeTracker lastModifiedTracker; - - public RollingCountBolt() { - this(DEFAULT_SLIDING_WINDOW_IN_SECONDS, DEFAULT_EMIT_FREQUENCY_IN_SECONDS); - } - - public RollingCountBolt(int windowLengthInSeconds, int emitFrequencyInSeconds) { - this.windowLengthInSeconds = windowLengthInSeconds; - this.emitFrequencyInSeconds = emitFrequencyInSeconds; - counter = new SlidingWindowCounter(deriveNumWindowChunksFrom(this.windowLengthInSeconds, - this.emitFrequencyInSeconds)); - } - - private int deriveNumWindowChunksFrom(int windowLengthInSeconds, int windowUpdateFrequencyInSeconds) { - return windowLengthInSeconds / windowUpdateFrequencyInSeconds; - } - - @SuppressWarnings("rawtypes") - @Override - public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) { - this.collector = collector; - lastModifiedTracker = new NthLastModifiedTimeTracker(deriveNumWindowChunksFrom(this.windowLengthInSeconds, - this.emitFrequencyInSeconds)); - } - - @Override - public void execute(Tuple tuple) { - if (TupleHelpers.isTickTuple(tuple)) { - LOG.debug("Received tick tuple, triggering emit of current window counts"); - emitCurrentWindowCounts(); - } - else { - countObjAndAck(tuple); - } - } - - private void emitCurrentWindowCounts() { - Map counts = counter.getCountsThenAdvanceWindow(); - int actualWindowLengthInSeconds = lastModifiedTracker.secondsSinceOldestModification(); - lastModifiedTracker.markAsModified(); - if (actualWindowLengthInSeconds != windowLengthInSeconds) { - LOG.warn(String.format(WINDOW_LENGTH_WARNING_TEMPLATE, actualWindowLengthInSeconds, windowLengthInSeconds)); - } - emit(counts, actualWindowLengthInSeconds); - } - - private void emit(Map counts, int actualWindowLengthInSeconds) { - for (Entry entry : counts.entrySet()) { - Object obj = entry.getKey(); - Long count = entry.getValue(); - collector.emit(new Values(obj, count, actualWindowLengthInSeconds)); - } - } - - private void countObjAndAck(Tuple tuple) { - Object obj = tuple.getValue(0); - counter.incrementCount(obj); - collector.ack(tuple); - } - - @Override - public void declareOutputFields(OutputFieldsDeclarer declarer) { - declarer.declare(new Fields("obj", "count", "actualWindowLengthInSeconds")); - } - - @Override - public Map getComponentConfiguration() { - Map conf = new HashMap(); - conf.put(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, emitFrequencyInSeconds); - return conf; - } -} diff --git a/examples/storm-starter/src/jvm/storm/starter/bolt/SingleJoinBolt.java b/examples/storm-starter/src/jvm/storm/starter/bolt/SingleJoinBolt.java deleted file mode 100644 index 85a7a267eb4..00000000000 --- a/examples/storm-starter/src/jvm/storm/starter/bolt/SingleJoinBolt.java +++ /dev/null @@ -1,114 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter.bolt; - -import backtype.storm.Config; -import backtype.storm.generated.GlobalStreamId; -import backtype.storm.task.OutputCollector; -import backtype.storm.task.TopologyContext; -import backtype.storm.topology.OutputFieldsDeclarer; -import backtype.storm.topology.base.BaseRichBolt; -import backtype.storm.tuple.Fields; -import backtype.storm.tuple.Tuple; -import backtype.storm.utils.TimeCacheMap; - -import java.util.*; - -public class SingleJoinBolt extends BaseRichBolt { - OutputCollector _collector; - Fields _idFields; - Fields _outFields; - int _numSources; - TimeCacheMap, Map> _pending; - Map _fieldLocations; - - public SingleJoinBolt(Fields outFields) { - _outFields = outFields; - } - - @Override - public void prepare(Map conf, TopologyContext context, OutputCollector collector) { - _fieldLocations = new HashMap(); - _collector = collector; - int timeout = ((Number) conf.get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS)).intValue(); - _pending = new TimeCacheMap, Map>(timeout, new ExpireCallback()); - _numSources = context.getThisSources().size(); - Set idFields = null; - for (GlobalStreamId source : context.getThisSources().keySet()) { - Fields fields = context.getComponentOutputFields(source.get_componentId(), source.get_streamId()); - Set setFields = new HashSet(fields.toList()); - if (idFields == null) - idFields = setFields; - else - idFields.retainAll(setFields); - - for (String outfield : _outFields) { - for (String sourcefield : fields) { - if (outfield.equals(sourcefield)) { - _fieldLocations.put(outfield, source); - } - } - } - } - _idFields = new Fields(new ArrayList(idFields)); - - if (_fieldLocations.size() != _outFields.size()) { - throw new RuntimeException("Cannot find all outfields among sources"); - } - } - - @Override - public void execute(Tuple tuple) { - List id = tuple.select(_idFields); - GlobalStreamId streamId = new GlobalStreamId(tuple.getSourceComponent(), tuple.getSourceStreamId()); - if (!_pending.containsKey(id)) { - _pending.put(id, new HashMap()); - } - Map parts = _pending.get(id); - if (parts.containsKey(streamId)) - throw new RuntimeException("Received same side of single join twice"); - parts.put(streamId, tuple); - if (parts.size() == _numSources) { - _pending.remove(id); - List joinResult = new ArrayList(); - for (String outField : _outFields) { - GlobalStreamId loc = _fieldLocations.get(outField); - joinResult.add(parts.get(loc).getValueByField(outField)); - } - _collector.emit(new ArrayList(parts.values()), joinResult); - - for (Tuple part : parts.values()) { - _collector.ack(part); - } - } - } - - @Override - public void declareOutputFields(OutputFieldsDeclarer declarer) { - declarer.declare(_outFields); - } - - private class ExpireCallback implements TimeCacheMap.ExpiredCallback, Map> { - @Override - public void expire(List id, Map tuples) { - for (Tuple tuple : tuples.values()) { - _collector.fail(tuple); - } - } - } -} diff --git a/examples/storm-starter/src/jvm/storm/starter/bolt/TotalRankingsBolt.java b/examples/storm-starter/src/jvm/storm/starter/bolt/TotalRankingsBolt.java deleted file mode 100644 index 0e1bb05a770..00000000000 --- a/examples/storm-starter/src/jvm/storm/starter/bolt/TotalRankingsBolt.java +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter.bolt; - -import backtype.storm.tuple.Tuple; -import org.apache.log4j.Logger; -import storm.starter.tools.Rankings; - -/** - * This bolt merges incoming {@link Rankings}. - *

- * It can be used to merge intermediate rankings generated by {@link IntermediateRankingsBolt} into a final, - * consolidated ranking. To do so, configure this bolt with a globalGrouping on {@link IntermediateRankingsBolt}. - */ -public final class TotalRankingsBolt extends AbstractRankerBolt { - - private static final long serialVersionUID = -8447525895532302198L; - private static final Logger LOG = Logger.getLogger(TotalRankingsBolt.class); - - public TotalRankingsBolt() { - super(); - } - - public TotalRankingsBolt(int topN) { - super(topN); - } - - public TotalRankingsBolt(int topN, int emitFrequencyInSeconds) { - super(topN, emitFrequencyInSeconds); - } - - @Override - void updateRankingsWithTuple(Tuple tuple) { - Rankings rankingsToBeMerged = (Rankings) tuple.getValue(0); - super.getRankings().updateWith(rankingsToBeMerged); - super.getRankings().pruneZeroCounts(); - } - - @Override - Logger getLogger() { - return LOG; - } - -} diff --git a/examples/storm-starter/src/jvm/storm/starter/spout/RandomSentenceSpout.java b/examples/storm-starter/src/jvm/storm/starter/spout/RandomSentenceSpout.java deleted file mode 100644 index 813b10cf388..00000000000 --- a/examples/storm-starter/src/jvm/storm/starter/spout/RandomSentenceSpout.java +++ /dev/null @@ -1,64 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter.spout; - -import backtype.storm.spout.SpoutOutputCollector; -import backtype.storm.task.TopologyContext; -import backtype.storm.topology.OutputFieldsDeclarer; -import backtype.storm.topology.base.BaseRichSpout; -import backtype.storm.tuple.Fields; -import backtype.storm.tuple.Values; -import backtype.storm.utils.Utils; - -import java.util.Map; -import java.util.Random; - -public class RandomSentenceSpout extends BaseRichSpout { - SpoutOutputCollector _collector; - Random _rand; - - - @Override - public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { - _collector = collector; - _rand = new Random(); - } - - @Override - public void nextTuple() { - Utils.sleep(100); - String[] sentences = new String[]{ "the cow jumped over the moon", "an apple a day keeps the doctor away", - "four score and seven years ago", "snow white and the seven dwarfs", "i am at two with nature" }; - String sentence = sentences[_rand.nextInt(sentences.length)]; - _collector.emit(new Values(sentence)); - } - - @Override - public void ack(Object id) { - } - - @Override - public void fail(Object id) { - } - - @Override - public void declareOutputFields(OutputFieldsDeclarer declarer) { - declarer.declare(new Fields("word")); - } - -} \ No newline at end of file diff --git a/examples/storm-starter/src/jvm/storm/starter/spout/TwitterSampleSpout.java b/examples/storm-starter/src/jvm/storm/starter/spout/TwitterSampleSpout.java deleted file mode 100644 index 40f8d72e3ad..00000000000 --- a/examples/storm-starter/src/jvm/storm/starter/spout/TwitterSampleSpout.java +++ /dev/null @@ -1,164 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package storm.starter.spout; - -import java.util.Map; -import java.util.concurrent.LinkedBlockingQueue; - -import twitter4j.FilterQuery; -import twitter4j.StallWarning; -import twitter4j.Status; -import twitter4j.StatusDeletionNotice; -import twitter4j.StatusListener; -import twitter4j.TwitterStream; -import twitter4j.TwitterStreamFactory; -import twitter4j.auth.AccessToken; -import twitter4j.conf.ConfigurationBuilder; - -import backtype.storm.Config; -import backtype.storm.spout.SpoutOutputCollector; -import backtype.storm.task.TopologyContext; -import backtype.storm.topology.OutputFieldsDeclarer; -import backtype.storm.topology.base.BaseRichSpout; -import backtype.storm.tuple.Fields; -import backtype.storm.tuple.Values; -import backtype.storm.utils.Utils; - -@SuppressWarnings("serial") -public class TwitterSampleSpout extends BaseRichSpout { - - SpoutOutputCollector _collector; - LinkedBlockingQueue queue = null; - TwitterStream _twitterStream; - String consumerKey; - String consumerSecret; - String accessToken; - String accessTokenSecret; - String[] keyWords; - - public TwitterSampleSpout(String consumerKey, String consumerSecret, - String accessToken, String accessTokenSecret, String[] keyWords) { - this.consumerKey = consumerKey; - this.consumerSecret = consumerSecret; - this.accessToken = accessToken; - this.accessTokenSecret = accessTokenSecret; - this.keyWords = keyWords; - } - - public TwitterSampleSpout() { - // TODO Auto-generated constructor stub - } - - @Override - public void open(Map conf, TopologyContext context, - SpoutOutputCollector collector) { - queue = new LinkedBlockingQueue(1000); - _collector = collector; - - StatusListener listener = new StatusListener() { - - @Override - public void onStatus(Status status) { - - queue.offer(status); - } - - @Override - public void onDeletionNotice(StatusDeletionNotice sdn) { - } - - @Override - public void onTrackLimitationNotice(int i) { - } - - @Override - public void onScrubGeo(long l, long l1) { - } - - @Override - public void onException(Exception ex) { - } - - @Override - public void onStallWarning(StallWarning arg0) { - // TODO Auto-generated method stub - - } - - }; - - TwitterStream twitterStream = new TwitterStreamFactory( - new ConfigurationBuilder().setJSONStoreEnabled(true).build()) - .getInstance(); - - twitterStream.addListener(listener); - twitterStream.setOAuthConsumer(consumerKey, consumerSecret); - AccessToken token = new AccessToken(accessToken, accessTokenSecret); - twitterStream.setOAuthAccessToken(token); - - if (keyWords.length == 0) { - - twitterStream.sample(); - } - - else { - - FilterQuery query = new FilterQuery().track(keyWords); - twitterStream.filter(query); - } - - } - - @Override - public void nextTuple() { - Status ret = queue.poll(); - if (ret == null) { - Utils.sleep(50); - } else { - _collector.emit(new Values(ret)); - - } - } - - @Override - public void close() { - _twitterStream.shutdown(); - } - - @Override - public Map getComponentConfiguration() { - Config ret = new Config(); - ret.setMaxTaskParallelism(1); - return ret; - } - - @Override - public void ack(Object id) { - } - - @Override - public void fail(Object id) { - } - - @Override - public void declareOutputFields(OutputFieldsDeclarer declarer) { - declarer.declare(new Fields("tweet")); - } - -} diff --git a/examples/storm-starter/src/jvm/storm/starter/tools/NthLastModifiedTimeTracker.java b/examples/storm-starter/src/jvm/storm/starter/tools/NthLastModifiedTimeTracker.java deleted file mode 100644 index 08df8cfd039..00000000000 --- a/examples/storm-starter/src/jvm/storm/starter/tools/NthLastModifiedTimeTracker.java +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter.tools; - -import backtype.storm.utils.Time; -import org.apache.commons.collections.buffer.CircularFifoBuffer; - -/** - * This class tracks the time-since-last-modify of a "thing" in a rolling fashion. - *

- * For example, create a 5-slot tracker to track the five most recent time-since-last-modify. - *

- * You must manually "mark" that the "something" that you want to track -- in terms of modification times -- has just - * been modified. - */ -public class NthLastModifiedTimeTracker { - - private static final int MILLIS_IN_SEC = 1000; - - private final CircularFifoBuffer lastModifiedTimesMillis; - - public NthLastModifiedTimeTracker(int numTimesToTrack) { - if (numTimesToTrack < 1) { - throw new IllegalArgumentException( - "numTimesToTrack must be greater than zero (you requested " + numTimesToTrack + ")"); - } - lastModifiedTimesMillis = new CircularFifoBuffer(numTimesToTrack); - initLastModifiedTimesMillis(); - } - - private void initLastModifiedTimesMillis() { - long nowCached = now(); - for (int i = 0; i < lastModifiedTimesMillis.maxSize(); i++) { - lastModifiedTimesMillis.add(Long.valueOf(nowCached)); - } - } - - private long now() { - return Time.currentTimeMillis(); - } - - public int secondsSinceOldestModification() { - long modifiedTimeMillis = ((Long) lastModifiedTimesMillis.get()).longValue(); - return (int) ((now() - modifiedTimeMillis) / MILLIS_IN_SEC); - } - - public void markAsModified() { - updateLastModifiedTime(); - } - - private void updateLastModifiedTime() { - lastModifiedTimesMillis.add(now()); - } - -} diff --git a/examples/storm-starter/src/jvm/storm/starter/tools/Rankable.java b/examples/storm-starter/src/jvm/storm/starter/tools/Rankable.java deleted file mode 100644 index 85e3d1d0f81..00000000000 --- a/examples/storm-starter/src/jvm/storm/starter/tools/Rankable.java +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter.tools; - -public interface Rankable extends Comparable { - - Object getObject(); - - long getCount(); - - /** - * Note: We do not defensively copy the object wrapped by the Rankable. It is passed as is. - * - * @return a defensive copy - */ - Rankable copy(); -} diff --git a/examples/storm-starter/src/jvm/storm/starter/tools/RankableObjectWithFields.java b/examples/storm-starter/src/jvm/storm/starter/tools/RankableObjectWithFields.java deleted file mode 100644 index 9a0ecae6abf..00000000000 --- a/examples/storm-starter/src/jvm/storm/starter/tools/RankableObjectWithFields.java +++ /dev/null @@ -1,148 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter.tools; - -import backtype.storm.tuple.Tuple; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; - -import java.io.Serializable; -import java.util.List; - -/** - * This class wraps an objects and its associated count, including any additional data fields. - *

- * This class can be used, for instance, to track the number of occurrences of an object in a Storm topology. - */ -public class RankableObjectWithFields implements Rankable, Serializable { - - private static final long serialVersionUID = -9102878650001058090L; - private static final String toStringSeparator = "|"; - - private final Object obj; - private final long count; - private final ImmutableList fields; - - public RankableObjectWithFields(Object obj, long count, Object... otherFields) { - if (obj == null) { - throw new IllegalArgumentException("The object must not be null"); - } - if (count < 0) { - throw new IllegalArgumentException("The count must be >= 0"); - } - this.obj = obj; - this.count = count; - fields = ImmutableList.copyOf(otherFields); - - } - - /** - * Construct a new instance based on the provided {@link Tuple}. - *

- * This method expects the object to be ranked in the first field (index 0) of the provided tuple, and the number of - * occurrences of the object (its count) in the second field (index 1). Any further fields in the tuple will be - * extracted and tracked, too. These fields can be accessed via {@link RankableObjectWithFields#getFields()}. - * - * @param tuple - * - * @return new instance based on the provided tuple - */ - public static RankableObjectWithFields from(Tuple tuple) { - List otherFields = Lists.newArrayList(tuple.getValues()); - Object obj = otherFields.remove(0); - Long count = (Long) otherFields.remove(0); - return new RankableObjectWithFields(obj, count, otherFields.toArray()); - } - - public Object getObject() { - return obj; - } - - public long getCount() { - return count; - } - - /** - * @return an immutable list of any additional data fields of the object (may be empty but will never be null) - */ - public List getFields() { - return fields; - } - - @Override - public int compareTo(Rankable other) { - long delta = this.getCount() - other.getCount(); - if (delta > 0) { - return 1; - } - else if (delta < 0) { - return -1; - } - else { - return 0; - } - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof RankableObjectWithFields)) { - return false; - } - RankableObjectWithFields other = (RankableObjectWithFields) o; - return obj.equals(other.obj) && count == other.count; - } - - @Override - public int hashCode() { - int result = 17; - int countHash = (int) (count ^ (count >>> 32)); - result = 31 * result + countHash; - result = 31 * result + obj.hashCode(); - return result; - } - - public String toString() { - StringBuffer buf = new StringBuffer(); - buf.append("["); - buf.append(obj); - buf.append(toStringSeparator); - buf.append(count); - for (Object field : fields) { - buf.append(toStringSeparator); - buf.append(field); - } - buf.append("]"); - return buf.toString(); - } - - /** - * Note: We do not defensively copy the wrapped object and any accompanying fields. We do guarantee, however, - * do return a defensive (shallow) copy of the List object that is wrapping any accompanying fields. - * - * @return - */ - @Override - public Rankable copy() { - List shallowCopyOfFields = ImmutableList.copyOf(getFields()); - return new RankableObjectWithFields(getObject(), getCount(), shallowCopyOfFields); - } - -} diff --git a/examples/storm-starter/src/jvm/storm/starter/tools/Rankings.java b/examples/storm-starter/src/jvm/storm/starter/tools/Rankings.java deleted file mode 100644 index 551ebfbe3d9..00000000000 --- a/examples/storm-starter/src/jvm/storm/starter/tools/Rankings.java +++ /dev/null @@ -1,156 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter.tools; - -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; - -import java.io.Serializable; -import java.util.Collections; -import java.util.List; - -public class Rankings implements Serializable { - - private static final long serialVersionUID = -1549827195410578903L; - private static final int DEFAULT_COUNT = 10; - - private final int maxSize; - private final List rankedItems = Lists.newArrayList(); - - public Rankings() { - this(DEFAULT_COUNT); - } - - public Rankings(int topN) { - if (topN < 1) { - throw new IllegalArgumentException("topN must be >= 1"); - } - maxSize = topN; - } - - /** - * Copy constructor. - * @param other - */ - public Rankings(Rankings other) { - this(other.maxSize()); - updateWith(other); - } - - /** - * @return the maximum possible number (size) of ranked objects this instance can hold - */ - public int maxSize() { - return maxSize; - } - - /** - * @return the number (size) of ranked objects this instance is currently holding - */ - public int size() { - return rankedItems.size(); - } - - /** - * The returned defensive copy is only "somewhat" defensive. We do, for instance, return a defensive copy of the - * enclosing List instance, and we do try to defensively copy any contained Rankable objects, too. However, the - * contract of {@link storm.starter.tools.Rankable#copy()} does not guarantee that any Object's embedded within - * a Rankable will be defensively copied, too. - * - * @return a somewhat defensive copy of ranked items - */ - public List getRankings() { - List copy = Lists.newLinkedList(); - for (Rankable r: rankedItems) { - copy.add(r.copy()); - } - return ImmutableList.copyOf(copy); - } - - public void updateWith(Rankings other) { - for (Rankable r : other.getRankings()) { - updateWith(r); - } - } - - public void updateWith(Rankable r) { - synchronized(rankedItems) { - addOrReplace(r); - rerank(); - shrinkRankingsIfNeeded(); - } - } - - private void addOrReplace(Rankable r) { - Integer rank = findRankOf(r); - if (rank != null) { - rankedItems.set(rank, r); - } - else { - rankedItems.add(r); - } - } - - private Integer findRankOf(Rankable r) { - Object tag = r.getObject(); - for (int rank = 0; rank < rankedItems.size(); rank++) { - Object cur = rankedItems.get(rank).getObject(); - if (cur.equals(tag)) { - return rank; - } - } - return null; - } - - private void rerank() { - Collections.sort(rankedItems); - Collections.reverse(rankedItems); - } - - private void shrinkRankingsIfNeeded() { - if (rankedItems.size() > maxSize) { - rankedItems.remove(maxSize); - } - } - - /** - * Removes ranking entries that have a count of zero. - */ - public void pruneZeroCounts() { - int i = 0; - while (i < rankedItems.size()) { - if (rankedItems.get(i).getCount() == 0) { - rankedItems.remove(i); - } - else { - i++; - } - } - } - - public String toString() { - return rankedItems.toString(); - } - - /** - * Creates a (defensive) copy of itself. - */ - public Rankings copy() { - return new Rankings(this); - } -} \ No newline at end of file diff --git a/examples/storm-starter/src/jvm/storm/starter/tools/SlidingWindowCounter.java b/examples/storm-starter/src/jvm/storm/starter/tools/SlidingWindowCounter.java deleted file mode 100644 index 1199c401165..00000000000 --- a/examples/storm-starter/src/jvm/storm/starter/tools/SlidingWindowCounter.java +++ /dev/null @@ -1,119 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter.tools; - -import java.io.Serializable; -import java.util.Map; - -/** - * This class counts objects in a sliding window fashion. - *

- * It is designed 1) to give multiple "producer" threads write access to the counter, i.e. being able to increment - * counts of objects, and 2) to give a single "consumer" thread (e.g. {@link PeriodicSlidingWindowCounter}) read access - * to the counter. Whenever the consumer thread performs a read operation, this class will advance the head slot of the - * sliding window counter. This means that the consumer thread indirectly controls where writes of the producer threads - * will go to. Also, by itself this class will not advance the head slot. - *

- * A note for analyzing data based on a sliding window count: During the initial windowLengthInSlots - * iterations, this sliding window counter will always return object counts that are equal or greater than in the - * previous iteration. This is the effect of the counter "loading up" at the very start of its existence. Conceptually, - * this is the desired behavior. - *

- * To give an example, using a counter with 5 slots which for the sake of this example represent 1 minute of time each: - *

- *

- * {@code
- * Sliding window counts of an object X over time
- *
- * Minute (timeline):
- * 1    2   3   4   5   6   7   8
- *
- * Observed counts per minute:
- * 1    1   1   1   0   0   0   0
- *
- * Counts returned by counter:
- * 1    2   3   4   4   3   2   1
- * }
- * 
- *

- * As you can see in this example, for the first windowLengthInSlots (here: the first five minutes) the - * counter will always return counts equal or greater than in the previous iteration (1, 2, 3, 4, 4). This initial load - * effect needs to be accounted for whenever you want to perform analyses such as trending topics; otherwise your - * analysis algorithm might falsely identify the object to be trending as the counter seems to observe continuously - * increasing counts. Also, note that during the initial load phase every object will exhibit increasing - * counts. - *

- * On a high-level, the counter exhibits the following behavior: If you asked the example counter after two minutes, - * "how often did you count the object during the past five minutes?", then it should reply "I have counted it 2 times - * in the past five minutes", implying that it can only account for the last two of those five minutes because the - * counter was not running before that time. - * - * @param The type of those objects we want to count. - */ -public final class SlidingWindowCounter implements Serializable { - - private static final long serialVersionUID = -2645063988768785810L; - - private SlotBasedCounter objCounter; - private int headSlot; - private int tailSlot; - private int windowLengthInSlots; - - public SlidingWindowCounter(int windowLengthInSlots) { - if (windowLengthInSlots < 2) { - throw new IllegalArgumentException( - "Window length in slots must be at least two (you requested " + windowLengthInSlots + ")"); - } - this.windowLengthInSlots = windowLengthInSlots; - this.objCounter = new SlotBasedCounter(this.windowLengthInSlots); - - this.headSlot = 0; - this.tailSlot = slotAfter(headSlot); - } - - public void incrementCount(T obj) { - objCounter.incrementCount(obj, headSlot); - } - - /** - * Return the current (total) counts of all tracked objects, then advance the window. - *

- * Whenever this method is called, we consider the counts of the current sliding window to be available to and - * successfully processed "upstream" (i.e. by the caller). Knowing this we will start counting any subsequent - * objects within the next "chunk" of the sliding window. - * - * @return The current (total) counts of all tracked objects. - */ - public Map getCountsThenAdvanceWindow() { - Map counts = objCounter.getCounts(); - objCounter.wipeZeros(); - objCounter.wipeSlot(tailSlot); - advanceHead(); - return counts; - } - - private void advanceHead() { - headSlot = tailSlot; - tailSlot = slotAfter(tailSlot); - } - - private int slotAfter(int slot) { - return (slot + 1) % windowLengthInSlots; - } - -} diff --git a/examples/storm-starter/src/jvm/storm/starter/tools/SlotBasedCounter.java b/examples/storm-starter/src/jvm/storm/starter/tools/SlotBasedCounter.java deleted file mode 100644 index 4b2d472dc9a..00000000000 --- a/examples/storm-starter/src/jvm/storm/starter/tools/SlotBasedCounter.java +++ /dev/null @@ -1,118 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter.tools; - -import java.io.Serializable; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; - -/** - * This class provides per-slot counts of the occurrences of objects. - *

- * It can be used, for instance, as a building block for implementing sliding window counting of objects. - * - * @param The type of those objects we want to count. - */ -public final class SlotBasedCounter implements Serializable { - - private static final long serialVersionUID = 4858185737378394432L; - - private final Map objToCounts = new HashMap(); - private final int numSlots; - - public SlotBasedCounter(int numSlots) { - if (numSlots <= 0) { - throw new IllegalArgumentException("Number of slots must be greater than zero (you requested " + numSlots + ")"); - } - this.numSlots = numSlots; - } - - public void incrementCount(T obj, int slot) { - long[] counts = objToCounts.get(obj); - if (counts == null) { - counts = new long[this.numSlots]; - objToCounts.put(obj, counts); - } - counts[slot]++; - } - - public long getCount(T obj, int slot) { - long[] counts = objToCounts.get(obj); - if (counts == null) { - return 0; - } - else { - return counts[slot]; - } - } - - public Map getCounts() { - Map result = new HashMap(); - for (T obj : objToCounts.keySet()) { - result.put(obj, computeTotalCount(obj)); - } - return result; - } - - private long computeTotalCount(T obj) { - long[] curr = objToCounts.get(obj); - long total = 0; - for (long l : curr) { - total += l; - } - return total; - } - - /** - * Reset the slot count of any tracked objects to zero for the given slot. - * - * @param slot - */ - public void wipeSlot(int slot) { - for (T obj : objToCounts.keySet()) { - resetSlotCountToZero(obj, slot); - } - } - - private void resetSlotCountToZero(T obj, int slot) { - long[] counts = objToCounts.get(obj); - counts[slot] = 0; - } - - private boolean shouldBeRemovedFromCounter(T obj) { - return computeTotalCount(obj) == 0; - } - - /** - * Remove any object from the counter whose total count is zero (to free up memory). - */ - public void wipeZeros() { - Set objToBeRemoved = new HashSet(); - for (T obj : objToCounts.keySet()) { - if (shouldBeRemovedFromCounter(obj)) { - objToBeRemoved.add(obj); - } - } - for (T obj : objToBeRemoved) { - objToCounts.remove(obj); - } - } - -} diff --git a/examples/storm-starter/src/jvm/storm/starter/trident/TridentReach.java b/examples/storm-starter/src/jvm/storm/starter/trident/TridentReach.java deleted file mode 100644 index 2d87c474eb6..00000000000 --- a/examples/storm-starter/src/jvm/storm/starter/trident/TridentReach.java +++ /dev/null @@ -1,156 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter.trident; - -import backtype.storm.Config; -import backtype.storm.LocalCluster; -import backtype.storm.LocalDRPC; -import backtype.storm.generated.StormTopology; -import backtype.storm.task.IMetricsContext; -import backtype.storm.tuple.Fields; -import backtype.storm.tuple.Values; -import storm.trident.TridentState; -import storm.trident.TridentTopology; -import storm.trident.operation.BaseFunction; -import storm.trident.operation.CombinerAggregator; -import storm.trident.operation.TridentCollector; -import storm.trident.operation.builtin.MapGet; -import storm.trident.operation.builtin.Sum; -import storm.trident.state.ReadOnlyState; -import storm.trident.state.State; -import storm.trident.state.StateFactory; -import storm.trident.state.map.ReadOnlyMapState; -import storm.trident.tuple.TridentTuple; - -import java.util.*; - -public class TridentReach { - public static Map> TWEETERS_DB = new HashMap>() {{ - put("foo.com/blog/1", Arrays.asList("sally", "bob", "tim", "george", "nathan")); - put("engineering.twitter.com/blog/5", Arrays.asList("adam", "david", "sally", "nathan")); - put("tech.backtype.com/blog/123", Arrays.asList("tim", "mike", "john")); - }}; - - public static Map> FOLLOWERS_DB = new HashMap>() {{ - put("sally", Arrays.asList("bob", "tim", "alice", "adam", "jim", "chris", "jai")); - put("bob", Arrays.asList("sally", "nathan", "jim", "mary", "david", "vivian")); - put("tim", Arrays.asList("alex")); - put("nathan", Arrays.asList("sally", "bob", "adam", "harry", "chris", "vivian", "emily", "jordan")); - put("adam", Arrays.asList("david", "carissa")); - put("mike", Arrays.asList("john", "bob")); - put("john", Arrays.asList("alice", "nathan", "jim", "mike", "bob")); - }}; - - public static class StaticSingleKeyMapState extends ReadOnlyState implements ReadOnlyMapState { - public static class Factory implements StateFactory { - Map _map; - - public Factory(Map map) { - _map = map; - } - - @Override - public State makeState(Map conf, IMetricsContext metrics, int partitionIndex, int numPartitions) { - return new StaticSingleKeyMapState(_map); - } - - } - - Map _map; - - public StaticSingleKeyMapState(Map map) { - _map = map; - } - - - @Override - public List multiGet(List> keys) { - List ret = new ArrayList(); - for (List key : keys) { - Object singleKey = key.get(0); - ret.add(_map.get(singleKey)); - } - return ret; - } - - } - - public static class One implements CombinerAggregator { - @Override - public Integer init(TridentTuple tuple) { - return 1; - } - - @Override - public Integer combine(Integer val1, Integer val2) { - return 1; - } - - @Override - public Integer zero() { - return 1; - } - } - - public static class ExpandList extends BaseFunction { - - @Override - public void execute(TridentTuple tuple, TridentCollector collector) { - List l = (List) tuple.getValue(0); - if (l != null) { - for (Object o : l) { - collector.emit(new Values(o)); - } - } - } - - } - - public static StormTopology buildTopology(LocalDRPC drpc) { - TridentTopology topology = new TridentTopology(); - TridentState urlToTweeters = topology.newStaticState(new StaticSingleKeyMapState.Factory(TWEETERS_DB)); - TridentState tweetersToFollowers = topology.newStaticState(new StaticSingleKeyMapState.Factory(FOLLOWERS_DB)); - - - topology.newDRPCStream("reach", drpc).stateQuery(urlToTweeters, new Fields("args"), new MapGet(), new Fields( - "tweeters")).each(new Fields("tweeters"), new ExpandList(), new Fields("tweeter")).shuffle().stateQuery( - tweetersToFollowers, new Fields("tweeter"), new MapGet(), new Fields("followers")).each(new Fields("followers"), - new ExpandList(), new Fields("follower")).groupBy(new Fields("follower")).aggregate(new One(), new Fields( - "one")).aggregate(new Fields("one"), new Sum(), new Fields("reach")); - return topology.build(); - } - - public static void main(String[] args) throws Exception { - LocalDRPC drpc = new LocalDRPC(); - - Config conf = new Config(); - LocalCluster cluster = new LocalCluster(); - - cluster.submitTopology("reach", conf, buildTopology(drpc)); - - Thread.sleep(2000); - - System.out.println("REACH: " + drpc.execute("reach", "aaa")); - System.out.println("REACH: " + drpc.execute("reach", "foo.com/blog/1")); - System.out.println("REACH: " + drpc.execute("reach", "engineering.twitter.com/blog/5")); - - - cluster.shutdown(); - drpc.shutdown(); - } -} diff --git a/examples/storm-starter/src/jvm/storm/starter/trident/TridentWordCount.java b/examples/storm-starter/src/jvm/storm/starter/trident/TridentWordCount.java deleted file mode 100644 index e4a2d2e92f5..00000000000 --- a/examples/storm-starter/src/jvm/storm/starter/trident/TridentWordCount.java +++ /dev/null @@ -1,85 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter.trident; - -import backtype.storm.Config; -import backtype.storm.LocalCluster; -import backtype.storm.LocalDRPC; -import backtype.storm.StormSubmitter; -import backtype.storm.generated.StormTopology; -import backtype.storm.tuple.Fields; -import backtype.storm.tuple.Values; -import storm.trident.TridentState; -import storm.trident.TridentTopology; -import storm.trident.operation.BaseFunction; -import storm.trident.operation.TridentCollector; -import storm.trident.operation.builtin.Count; -import storm.trident.operation.builtin.FilterNull; -import storm.trident.operation.builtin.MapGet; -import storm.trident.operation.builtin.Sum; -import storm.trident.testing.FixedBatchSpout; -import storm.trident.testing.MemoryMapState; -import storm.trident.tuple.TridentTuple; - - -public class TridentWordCount { - public static class Split extends BaseFunction { - @Override - public void execute(TridentTuple tuple, TridentCollector collector) { - String sentence = tuple.getString(0); - for (String word : sentence.split(" ")) { - collector.emit(new Values(word)); - } - } - } - - public static StormTopology buildTopology(LocalDRPC drpc) { - FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence"), 3, new Values("the cow jumped over the moon"), - new Values("the man went to the store and bought some candy"), new Values("four score and seven years ago"), - new Values("how many apples can you eat"), new Values("to be or not to be the person")); - spout.setCycle(true); - - TridentTopology topology = new TridentTopology(); - TridentState wordCounts = topology.newStream("spout1", spout).parallelismHint(16).each(new Fields("sentence"), - new Split(), new Fields("word")).groupBy(new Fields("word")).persistentAggregate(new MemoryMapState.Factory(), - new Count(), new Fields("count")).parallelismHint(16); - - topology.newDRPCStream("words", drpc).each(new Fields("args"), new Split(), new Fields("word")).groupBy(new Fields( - "word")).stateQuery(wordCounts, new Fields("word"), new MapGet(), new Fields("count")).each(new Fields("count"), - new FilterNull()).aggregate(new Fields("count"), new Sum(), new Fields("sum")); - return topology.build(); - } - - public static void main(String[] args) throws Exception { - Config conf = new Config(); - conf.setMaxSpoutPending(20); - if (args.length == 0) { - LocalDRPC drpc = new LocalDRPC(); - LocalCluster cluster = new LocalCluster(); - cluster.submitTopology("wordCounter", conf, buildTopology(drpc)); - for (int i = 0; i < 100; i++) { - System.out.println("DRPC RESULT: " + drpc.execute("words", "cat the dog jumped")); - Thread.sleep(1000); - } - } - else { - conf.setNumWorkers(3); - StormSubmitter.submitTopologyWithProgressBar(args[0], conf, buildTopology(null)); - } - } -} diff --git a/examples/storm-starter/src/jvm/storm/starter/util/StormRunner.java b/examples/storm-starter/src/jvm/storm/starter/util/StormRunner.java deleted file mode 100644 index f916ec61b8f..00000000000 --- a/examples/storm-starter/src/jvm/storm/starter/util/StormRunner.java +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter.util; - -import backtype.storm.Config; -import backtype.storm.LocalCluster; -import backtype.storm.generated.StormTopology; - -public final class StormRunner { - - private static final int MILLIS_IN_SEC = 1000; - - private StormRunner() { - } - - public static void runTopologyLocally(StormTopology topology, String topologyName, Config conf, int runtimeInSeconds) - throws InterruptedException { - LocalCluster cluster = new LocalCluster(); - cluster.submitTopology(topologyName, conf, topology); - Thread.sleep((long) runtimeInSeconds * MILLIS_IN_SEC); - cluster.killTopology(topologyName); - cluster.shutdown(); - } -} diff --git a/examples/storm-starter/src/jvm/storm/starter/util/TupleHelpers.java b/examples/storm-starter/src/jvm/storm/starter/util/TupleHelpers.java deleted file mode 100644 index 4ea669ed2e1..00000000000 --- a/examples/storm-starter/src/jvm/storm/starter/util/TupleHelpers.java +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter.util; - -import backtype.storm.Constants; -import backtype.storm.tuple.Tuple; - -public final class TupleHelpers { - - private TupleHelpers() { - } - - public static boolean isTickTuple(Tuple tuple) { - return tuple.getSourceComponent().equals(Constants.SYSTEM_COMPONENT_ID) && tuple.getSourceStreamId().equals( - Constants.SYSTEM_TICK_STREAM_ID); - } - -} diff --git a/examples/storm-starter/test/clj/org/apache/storm/starter/clj/bolts_test.clj b/examples/storm-starter/test/clj/org/apache/storm/starter/clj/bolts_test.clj new file mode 100644 index 00000000000..745678cac23 --- /dev/null +++ b/examples/storm-starter/test/clj/org/apache/storm/starter/clj/bolts_test.clj @@ -0,0 +1,106 @@ +;; Licensed to the Apache Software Foundation (ASF) under one +;; or more contributor license agreements. See the NOTICE file +;; distributed with this work for additional information +;; regarding copyright ownership. The ASF licenses this file +;; to you under the Apache License, Version 2.0 (the +;; "License"); you may not use this file except in compliance +;; with the License. You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +(ns org.apache.storm.starter.clj.bolts-test + (:require [clojure.test :refer :all] + [org.apache.storm.starter.clj.word-count :refer [word-count split-sentence]] + [org.apache.storm.starter.clj.exclamation :refer [exclamation-bolt]] + [org.apache.storm.starter.clj.bolts :refer + [rolling-count-bolt intermediate-rankings-bolt total-rankings-bolt]] + [org.apache.storm [testing :refer :all]]) + (:import [org.apache.storm Constants Testing] + [org.apache.storm.testing MkTupleParam] + [org.apache.storm.task OutputCollector IOutputCollector] + [org.apache.storm.starter.tools Rankable] + [org.apache.storm.tuple Tuple] + [java.util ArrayList])) + +(defn execute-tuples [bolt tuples] + (let [out (atom [])] + (.prepare bolt {} nil (OutputCollector. + (reify IOutputCollector + (emit [_ _ _ tuple] + (swap! out conj tuple)) + (ack [_ input])))) + (if (vector? tuples) + (doseq [t tuples] + (.execute bolt t)) + (.execute bolt tuples)) + @out)) + +(defn- mock-tuple [m & {component :component stream-id :stream-id + :or {component "1" stream-id "1"}}] + (let [param (MkTupleParam.)] + (.setStream param stream-id) + (.setComponent param component) + (.setFieldsList param (ArrayList. (.keySet m))) + (Testing/testTuple (ArrayList. (.values m)) param))) + +(def ^{:private true} tick-tuple + (mock-tuple {} + :component Constants/SYSTEM_COMPONENT_ID + :stream-id Constants/SYSTEM_TICK_STREAM_ID)) + +(deftest test-split-sentence + (testing "Bolt emits word per sentence" + (let [tuples (execute-tuples + split-sentence + (mock-tuple {"sentence" "the cat jumped over the door"}))] + (is (= [["the"] ["cat"] ["jumped"] ["over"] ["the"] ["door"]] tuples))))) + +(deftest test-word-count + (testing "Bolt emits new count" + (let [tuples (execute-tuples word-count [(mock-tuple {"word" "the"}) + (mock-tuple {"word" "the"}) + (mock-tuple {"word" "cat"})])] + (is (ms= [["the" 1] ["the" 2] ["cat" 1]] tuples))))) + +(deftest test-exclamation-bolt + (testing "Bolt emits word with exclamation marks" + (let [tuples (execute-tuples exclamation-bolt (mock-tuple {"word" "nathan"}))] + (is (= [["nathan!!!"]] tuples))))) + +(deftest test-rolling-bolt + (testing "Emits nothing if no object has been counted" + (let [tuples (execute-tuples (rolling-count-bolt 9 3) tick-tuple)] + (is (empty? tuples)))) + (testing "Emits something if object was counted" + (let [tuples (execute-tuples (rolling-count-bolt 9 3) + [(mock-tuple {"word" "nathan"}) tick-tuple])] + (is (= [["nathan" 1 0]] tuples))))) + +(deftest test-intermediate-rankings-bolt + (testing "Emits rankings for tick tuple" + (let [tuples (execute-tuples (intermediate-rankings-bolt 5 2) tick-tuple)] + (is (seq tuples)))) + (testing "Emits nothing for normal tuple" + (let [tuples (execute-tuples (intermediate-rankings-bolt 5 2) + (mock-tuple {"obj" "nathan" "count" 1}))] + (is (empty? tuples))))) + +(defn- mock-rankable [object count] + "Creates rankable with object and count" + (reify Rankable + (getCount [_] count) + (getObject [_] object))) + +(deftest test-total-rankings-bolt + (testing "Emits rankings for tick tuple" + (let [tuples (execute-tuples (total-rankings-bolt 5 2) tick-tuple)] + (is (seq tuples)))) + (testing "Emits nothing for normal tuple" + (let [tuples (execute-tuples (total-rankings-bolt 5 2) + (mock-tuple {"rankings" (mock-rankable "nathan" 2)}))] + (is (empty? tuples))))) diff --git a/examples/storm-starter/test/jvm/org/apache/storm/starter/bolt/IntermediateRankingsBoltTest.java b/examples/storm-starter/test/jvm/org/apache/storm/starter/bolt/IntermediateRankingsBoltTest.java new file mode 100644 index 00000000000..18834d3095c --- /dev/null +++ b/examples/storm-starter/test/jvm/org/apache/storm/starter/bolt/IntermediateRankingsBoltTest.java @@ -0,0 +1,144 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.bolt; + +import com.google.common.collect.Lists; +import java.util.Map; +import org.apache.storm.Config; +import org.apache.storm.topology.BasicOutputCollector; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; +import org.apache.storm.utils.MockTupleHelpers; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +import static org.fest.assertions.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; +import static org.mockito.Mockito.when; + +public class IntermediateRankingsBoltTest { + + private static final String ANY_NON_SYSTEM_COMPONENT_ID = "irrelevant_component_id"; + private static final String ANY_NON_SYSTEM_STREAM_ID = "irrelevant_stream_id"; + private static final Object ANY_OBJECT = new Object(); + private static final int ANY_TOPN = 10; + private static final long ANY_COUNT = 42; + + private Tuple mockRankableTuple(Object obj, long count) { + Tuple tuple = MockTupleHelpers.mockTuple(ANY_NON_SYSTEM_COMPONENT_ID, ANY_NON_SYSTEM_STREAM_ID); + when(tuple.getValues()).thenReturn(Lists.newArrayList(ANY_OBJECT, ANY_COUNT)); + return tuple; + } + + @DataProvider + public Object[][] illegalTopN() { + return new Object[][]{ { -10 }, { -3 }, { -2 }, { -1 }, { 0 } }; + } + + @Test(expectedExceptions = IllegalArgumentException.class, dataProvider = "illegalTopN") + public void negativeOrZeroTopNShouldThrowIAE(int topN) { + new IntermediateRankingsBolt(topN); + } + + @DataProvider + public Object[][] illegalEmitFrequency() { + return new Object[][]{ { -10 }, { -3 }, { -2 }, { -1 }, { 0 } }; + } + + @Test(expectedExceptions = IllegalArgumentException.class, dataProvider = "illegalEmitFrequency") + public void negativeOrZeroEmitFrequencyShouldThrowIAE(int emitFrequencyInSeconds) { + new IntermediateRankingsBolt(ANY_TOPN, emitFrequencyInSeconds); + } + + @DataProvider + public Object[][] legalTopN() { + return new Object[][]{ { 1 }, { 2 }, { 3 }, { 20 } }; + } + + @Test(dataProvider = "legalTopN") + public void positiveTopNShouldBeOk(int topN) { + new IntermediateRankingsBolt(topN); + } + + @DataProvider + public Object[][] legalEmitFrequency() { + return new Object[][]{ { 1 }, { 2 }, { 3 }, { 20 } }; + } + + @Test(dataProvider = "legalEmitFrequency") + public void positiveEmitFrequencyShouldBeOk(int emitFrequencyInSeconds) { + new IntermediateRankingsBolt(ANY_TOPN, emitFrequencyInSeconds); + } + + @Test + public void shouldEmitSomethingIfTickTupleIsReceived() { + // given + Tuple tickTuple = MockTupleHelpers.mockTickTuple(); + BasicOutputCollector collector = mock(BasicOutputCollector.class); + IntermediateRankingsBolt bolt = new IntermediateRankingsBolt(); + + // when + bolt.execute(tickTuple, collector); + + // then + // verifyNoInteractions(collector); + verify(collector).emit(any(Values.class)); + } + + @Test + public void shouldEmitNothingIfNormalTupleIsReceived() { + // given + Tuple normalTuple = mockRankableTuple(ANY_OBJECT, ANY_COUNT); + BasicOutputCollector collector = mock(BasicOutputCollector.class); + IntermediateRankingsBolt bolt = new IntermediateRankingsBolt(); + + // when + bolt.execute(normalTuple, collector); + + // then + verifyNoInteractions(collector); + } + + @Test + public void shouldDeclareOutputFields() { + // given + OutputFieldsDeclarer declarer = mock(OutputFieldsDeclarer.class); + IntermediateRankingsBolt bolt = new IntermediateRankingsBolt(); + + // when + bolt.declareOutputFields(declarer); + + // then + verify(declarer, times(1)).declare(any(Fields.class)); + } + + @Test + public void shouldSetTickTupleFrequencyInComponentConfigurationToNonZeroValue() { + // given + IntermediateRankingsBolt bolt = new IntermediateRankingsBolt(); + + // when + Map componentConfig = bolt.getComponentConfiguration(); + + // then + assertThat(componentConfig).containsKey(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS); + Integer emitFrequencyInSeconds = (Integer) componentConfig.get(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS); + assertThat(emitFrequencyInSeconds).isGreaterThan(0); + } +} diff --git a/examples/storm-starter/test/jvm/org/apache/storm/starter/bolt/RollingCountBoltTest.java b/examples/storm-starter/test/jvm/org/apache/storm/starter/bolt/RollingCountBoltTest.java new file mode 100644 index 00000000000..56d65606520 --- /dev/null +++ b/examples/storm-starter/test/jvm/org/apache/storm/starter/bolt/RollingCountBoltTest.java @@ -0,0 +1,111 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.bolt; + +import java.util.Map; +import org.apache.storm.Config; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; +import org.apache.storm.utils.MockTupleHelpers; +import org.testng.annotations.Test; + +import static org.fest.assertions.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; +import static org.mockito.Mockito.when; + +public class RollingCountBoltTest { + + private static final String ANY_NON_SYSTEM_COMPONENT_ID = "irrelevant_component_id"; + private static final String ANY_NON_SYSTEM_STREAM_ID = "irrelevant_stream_id"; + + private Tuple mockNormalTuple(Object obj) { + Tuple tuple = MockTupleHelpers.mockTuple(ANY_NON_SYSTEM_COMPONENT_ID, ANY_NON_SYSTEM_STREAM_ID); + when(tuple.getValue(0)).thenReturn(obj); + return tuple; + } + + @SuppressWarnings("rawtypes") + @Test + public void shouldEmitNothingIfNoObjectHasBeenCountedYetAndTickTupleIsReceived() { + // given + Tuple tickTuple = MockTupleHelpers.mockTickTuple(); + RollingCountBolt bolt = new RollingCountBolt(); + Map conf = mock(Map.class); + TopologyContext context = mock(TopologyContext.class); + OutputCollector collector = mock(OutputCollector.class); + bolt.prepare(conf, context, collector); + + // when + bolt.execute(tickTuple); + + // then + verifyNoInteractions(collector); + } + + @SuppressWarnings("rawtypes") + @Test + public void shouldEmitSomethingIfAtLeastOneObjectWasCountedAndTickTupleIsReceived() { + // given + Tuple normalTuple = mockNormalTuple(new Object()); + Tuple tickTuple = MockTupleHelpers.mockTickTuple(); + + RollingCountBolt bolt = new RollingCountBolt(); + Map conf = mock(Map.class); + TopologyContext context = mock(TopologyContext.class); + OutputCollector collector = mock(OutputCollector.class); + bolt.prepare(conf, context, collector); + + // when + bolt.execute(normalTuple); + bolt.execute(tickTuple); + + // then + verify(collector).emit(any(Values.class)); + } + + @Test + public void shouldDeclareOutputFields() { + // given + OutputFieldsDeclarer declarer = mock(OutputFieldsDeclarer.class); + RollingCountBolt bolt = new RollingCountBolt(); + + // when + bolt.declareOutputFields(declarer); + + // then + verify(declarer, times(1)).declare(any(Fields.class)); + + } + + @Test + public void shouldSetTickTupleFrequencyInComponentConfigurationToNonZeroValue() { + // given + RollingCountBolt bolt = new RollingCountBolt(); + + // when + Map componentConfig = bolt.getComponentConfiguration(); + + // then + assertThat(componentConfig).containsKey(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS); + Integer emitFrequencyInSeconds = (Integer) componentConfig.get(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS); + assertThat(emitFrequencyInSeconds).isGreaterThan(0); + } +} diff --git a/examples/storm-starter/test/jvm/org/apache/storm/starter/bolt/TotalRankingsBoltTest.java b/examples/storm-starter/test/jvm/org/apache/storm/starter/bolt/TotalRankingsBoltTest.java new file mode 100644 index 00000000000..d74e41e5a19 --- /dev/null +++ b/examples/storm-starter/test/jvm/org/apache/storm/starter/bolt/TotalRankingsBoltTest.java @@ -0,0 +1,145 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.bolt; + +import java.util.Map; +import org.apache.storm.Config; +import org.apache.storm.starter.tools.Rankings; +import org.apache.storm.topology.BasicOutputCollector; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; +import org.apache.storm.utils.MockTupleHelpers; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +import static org.fest.assertions.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; +import static org.mockito.Mockito.when; + +public class TotalRankingsBoltTest { + + private static final String ANY_NON_SYSTEM_COMPONENT_ID = "irrelevant_component_id"; + private static final String ANY_NON_SYSTEM_STREAM_ID = "irrelevant_stream_id"; + private static final Object ANY_OBJECT = new Object(); + private static final int ANY_TOPN = 10; + private static final long ANY_COUNT = 42; + + private Tuple mockRankingsTuple(Object obj, long count) { + Tuple tuple = MockTupleHelpers.mockTuple(ANY_NON_SYSTEM_COMPONENT_ID, ANY_NON_SYSTEM_STREAM_ID); + Rankings rankings = mock(Rankings.class); + when(tuple.getValue(0)).thenReturn(rankings); + return tuple; + } + + @DataProvider + public Object[][] illegalTopN() { + return new Object[][]{ { -10 }, { -3 }, { -2 }, { -1 }, { 0 } }; + } + + @Test(expectedExceptions = IllegalArgumentException.class, dataProvider = "illegalTopN") + public void negativeOrZeroTopNShouldThrowIAE(int topN) { + new TotalRankingsBolt(topN); + } + + @DataProvider + public Object[][] illegalEmitFrequency() { + return new Object[][]{ { -10 }, { -3 }, { -2 }, { -1 }, { 0 } }; + } + + @Test(expectedExceptions = IllegalArgumentException.class, dataProvider = "illegalEmitFrequency") + public void negativeOrZeroEmitFrequencyShouldThrowIAE(int emitFrequencyInSeconds) { + new TotalRankingsBolt(ANY_TOPN, emitFrequencyInSeconds); + } + + @DataProvider + public Object[][] legalTopN() { + return new Object[][]{ { 1 }, { 2 }, { 3 }, { 20 } }; + } + + @Test(dataProvider = "legalTopN") + public void positiveTopNShouldBeOk(int topN) { + new TotalRankingsBolt(topN); + } + + @DataProvider + public Object[][] legalEmitFrequency() { + return new Object[][]{ { 1 }, { 2 }, { 3 }, { 20 } }; + } + + @Test(dataProvider = "legalEmitFrequency") + public void positiveEmitFrequencyShouldBeOk(int emitFrequencyInSeconds) { + new TotalRankingsBolt(ANY_TOPN, emitFrequencyInSeconds); + } + + @Test + public void shouldEmitSomethingIfTickTupleIsReceived() { + // given + Tuple tickTuple = MockTupleHelpers.mockTickTuple(); + BasicOutputCollector collector = mock(BasicOutputCollector.class); + TotalRankingsBolt bolt = new TotalRankingsBolt(); + + // when + bolt.execute(tickTuple, collector); + + // then + // verifyNoInteractions(collector); + verify(collector).emit(any(Values.class)); + } + + @Test + public void shouldEmitNothingIfNormalTupleIsReceived() { + // given + Tuple normalTuple = mockRankingsTuple(ANY_OBJECT, ANY_COUNT); + BasicOutputCollector collector = mock(BasicOutputCollector.class); + TotalRankingsBolt bolt = new TotalRankingsBolt(); + + // when + bolt.execute(normalTuple, collector); + + // then + verifyNoInteractions(collector); + } + + @Test + public void shouldDeclareOutputFields() { + // given + OutputFieldsDeclarer declarer = mock(OutputFieldsDeclarer.class); + TotalRankingsBolt bolt = new TotalRankingsBolt(); + + // when + bolt.declareOutputFields(declarer); + + // then + verify(declarer, times(1)).declare(any(Fields.class)); + } + + @Test + public void shouldSetTickTupleFrequencyInComponentConfigurationToNonZeroValue() { + // given + TotalRankingsBolt bolt = new TotalRankingsBolt(); + + // when + Map componentConfig = bolt.getComponentConfiguration(); + + // then + assertThat(componentConfig).containsKey(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS); + Integer emitFrequencyInSeconds = (Integer) componentConfig.get(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS); + assertThat(emitFrequencyInSeconds).isGreaterThan(0); + } +} diff --git a/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/NthLastModifiedTimeTrackerTest.java b/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/NthLastModifiedTimeTrackerTest.java new file mode 100644 index 00000000000..f4393ed41b4 --- /dev/null +++ b/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/NthLastModifiedTimeTrackerTest.java @@ -0,0 +1,115 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.tools; + +import org.apache.storm.utils.Time; +import org.apache.storm.utils.Time.SimulatedTime; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +import static org.fest.assertions.api.Assertions.assertThat; + +public class NthLastModifiedTimeTrackerTest { + + private static final int ANY_NUM_TIMES_TO_TRACK = 3; + private static final int MILLIS_IN_SEC = 1000; + + @DataProvider + public Object[][] illegalNumTimesData() { + return new Object[][]{ { -10 }, { -3 }, { -2 }, { -1 }, { 0 } }; + } + + @Test(expectedExceptions = IllegalArgumentException.class, dataProvider = "illegalNumTimesData") + public void negativeOrZeroNumTimesToTrackShouldThrowIAE(int numTimesToTrack) { + new NthLastModifiedTimeTracker(numTimesToTrack); + } + + @DataProvider + public Object[][] legalNumTimesData() { + return new Object[][]{ { 1 }, { 2 }, { 3 }, { 20 } }; + } + + @Test(dataProvider = "legalNumTimesData") + public void positiveNumTimesToTrackShouldBeOk(int numTimesToTrack) { + new NthLastModifiedTimeTracker(numTimesToTrack); + } + + @DataProvider + public Object[][] whenNotYetMarkedAsModifiedData() { + return new Object[][]{ { 0 }, { 1 }, { 2 }, { 3 }, { 4 }, { 5 }, { 8 }, { 10 } }; + } + + @Test(dataProvider = "whenNotYetMarkedAsModifiedData") + public void shouldReturnCorrectModifiedTimeEvenWhenNotYetMarkedAsModified(int secondsToAdvance) { + // given + try (SimulatedTime t = new SimulatedTime()) { + NthLastModifiedTimeTracker tracker = new NthLastModifiedTimeTracker(ANY_NUM_TIMES_TO_TRACK); + + // when + Time.advanceTimeSecs(secondsToAdvance); + int seconds = tracker.secondsSinceOldestModification(); + + // then + assertThat(seconds).isEqualTo(secondsToAdvance); + } + } + + @DataProvider + public Object[][] simulatedTrackerIterations() { + return new Object[][]{ + { 1, new int[]{ 0, 1 }, new int[]{ 0, 0 } }, { 1, new int[]{ 0, 2 }, new int[]{ 0, 0 } }, + { 2, new int[]{ 2, 2 }, new int[]{ 2, 2 } }, { 2, new int[]{ 0, 4 }, new int[]{ 0, 4 } }, + { 1, new int[]{ 1, 1, 1, 1, 1, 1, 1 }, new int[]{ 0, 0, 0, 0, 0, 0, 0 } }, + { 1, new int[]{ 1, 2, 3, 4, 5, 6, 7 }, new int[]{ 0, 0, 0, 0, 0, 0, 0 } }, + { 2, new int[]{ 1, 1, 1, 1, 1, 1, 1 }, new int[]{ 1, 1, 1, 1, 1, 1, 1 } }, + { 2, new int[]{ 2, 2, 2, 2, 2, 2, 2 }, new int[]{ 2, 2, 2, 2, 2, 2, 2 } }, + { 2, new int[]{ 1, 2, 3, 4, 5, 6, 7 }, new int[]{ 1, 2, 3, 4, 5, 6, 7 } }, + { 3, new int[]{ 1, 1, 1, 1, 1, 1, 1 }, new int[]{ 1, 2, 2, 2, 2, 2, 2 } }, + { 3, new int[]{ 1, 2, 3, 4, 5, 6, 7 }, new int[]{ 1, 3, 5, 7, 9, 11, 13 } }, + { 3, new int[]{ 2, 2, 2, 2, 2, 2, 2 }, new int[]{ 2, 4, 4, 4, 4, 4, 4 } }, + { 4, new int[]{ 1, 1, 1, 1, 1, 1, 1 }, new int[]{ 1, 2, 3, 3, 3, 3, 3 } }, + { 4, new int[]{ 1, 2, 3, 4, 5, 6, 7 }, new int[]{ 1, 3, 6, 9, 12, 15, 18 } }, + { 4, new int[]{ 2, 2, 2, 2, 2, 2, 2 }, new int[]{ 2, 4, 6, 6, 6, 6, 6 } }, + { 5, new int[]{ 1, 1, 1, 1, 1, 1, 1 }, new int[]{ 1, 2, 3, 4, 4, 4, 4 } }, + { 5, new int[]{ 1, 2, 3, 4, 5, 6, 7 }, new int[]{ 1, 3, 6, 10, 14, 18, 22 } }, + { 5, new int[]{ 2, 2, 2, 2, 2, 2, 2 }, new int[]{ 2, 4, 6, 8, 8, 8, 8 } }, + { 6, new int[]{ 1, 1, 1, 1, 1, 1, 1 }, new int[]{ 1, 2, 3, 4, 5, 5, 5 } }, + { 6, new int[]{ 1, 2, 3, 4, 5, 6, 7 }, new int[]{ 1, 3, 6, 10, 15, 20, 25 } }, + { 6, new int[]{ 2, 2, 2, 2, 2, 2, 2 }, new int[]{ 2, 4, 6, 8, 10, 10, 10 } }, + { 3, new int[]{ 1, 2, 3 }, new int[]{ 1, 3, 5 } } + }; + } + + @Test(dataProvider = "simulatedTrackerIterations") + public void shouldReturnCorrectModifiedTimeWhenMarkedAsModified(int numTimesToTrack, + int[] secondsToAdvancePerIteration, int[] expLastModifiedTimes) { + // given + try (SimulatedTime t = new SimulatedTime()) { + NthLastModifiedTimeTracker tracker = new NthLastModifiedTimeTracker(numTimesToTrack); + + int[] modifiedTimes = new int[expLastModifiedTimes.length]; + + // when + int i = 0; + for (int secondsToAdvance : secondsToAdvancePerIteration) { + Time.advanceTimeSecs(secondsToAdvance); + tracker.markAsModified(); + modifiedTimes[i] = tracker.secondsSinceOldestModification(); + i++; + } + + // then + assertThat(modifiedTimes).isEqualTo(expLastModifiedTimes); + } + } +} diff --git a/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/RankableObjectWithFieldsTest.java b/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/RankableObjectWithFieldsTest.java new file mode 100644 index 00000000000..cf275d26d72 --- /dev/null +++ b/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/RankableObjectWithFieldsTest.java @@ -0,0 +1,274 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.tools; + +import com.google.common.collect.Lists; +import java.util.ArrayList; +import java.util.List; +import org.apache.storm.tuple.Tuple; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +import static org.fest.assertions.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertTrue; + +public class RankableObjectWithFieldsTest { + + private static final Object ANY_OBJECT = new Object(); + private static final long ANY_COUNT = 271; + private static final String ANY_FIELD = "someAdditionalField"; + private static final int GREATER_THAN = 1; + private static final int EQUAL_TO = 0; + private static final int SMALLER_THAN = -1; + + @Test(expectedExceptions = IllegalArgumentException.class) + public void constructorWithNullObjectAndNoFieldsShouldThrowIAE() { + new RankableObjectWithFields(null, ANY_COUNT); + } + + @Test(expectedExceptions = IllegalArgumentException.class) + public void constructorWithNullObjectAndFieldsShouldThrowIAE() { + Object someAdditionalField = new Object(); + new RankableObjectWithFields(null, ANY_COUNT, someAdditionalField); + } + + @Test(expectedExceptions = IllegalArgumentException.class) + public void constructorWithNegativeCountAndNoFieldsShouldThrowIAE() { + new RankableObjectWithFields(ANY_OBJECT, -1); + } + + @Test(expectedExceptions = IllegalArgumentException.class) + public void constructorWithNegativeCountAndFieldsShouldThrowIAE() { + Object someAdditionalField = new Object(); + new RankableObjectWithFields(ANY_OBJECT, -1, someAdditionalField); + } + + @Test + public void shouldBeEqualToItself() { + RankableObjectWithFields r = new RankableObjectWithFields(ANY_OBJECT, ANY_COUNT); + assertThat(r).isEqualTo(r); + } + + @DataProvider + public Object[][] otherClassesData() { + return new Object[][]{ + {"foo"}, { new Object() }, {4}, { + Lists.newArrayList(7, 8, 9) + } + }; + } + + @Test(dataProvider = "otherClassesData") + public void shouldNotBeEqualToInstancesOfOtherClasses(Object notARankable) { + RankableObjectWithFields r = new RankableObjectWithFields(ANY_OBJECT, ANY_COUNT); + assertFalse(r.equals(notARankable), r + " is equal to " + notARankable + " but it should not be"); + } + + @DataProvider + public Object[][] falseDuplicatesData() { + return new Object[][]{ + { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("foo", 1) }, + { new RankableObjectWithFields("foo", 1), new RankableObjectWithFields("Foo", 1) }, + { new RankableObjectWithFields("foo", 1), new RankableObjectWithFields("FOO", 1) }, + { new RankableObjectWithFields("foo", 1), new RankableObjectWithFields("bar", 1) }, + { new RankableObjectWithFields("", 0), new RankableObjectWithFields("", 1) }, { + new RankableObjectWithFields("", + 1), new RankableObjectWithFields("bar", 1) + } + }; + } + + @Test(dataProvider = "falseDuplicatesData") + public void shouldNotBeEqualToFalseDuplicates(RankableObjectWithFields r, RankableObjectWithFields falseDuplicate) { + assertFalse(r.equals(falseDuplicate), r + " is equal to " + falseDuplicate + " but it should not be"); + } + + @Test(dataProvider = "falseDuplicatesData") + public void shouldHaveDifferentHashCodeThanFalseDuplicates(RankableObjectWithFields r, + RankableObjectWithFields falseDuplicate) { + assertThat(r.hashCode()).isNotEqualTo(falseDuplicate.hashCode()); + } + + @DataProvider + public Object[][] trueDuplicatesData() { + return new Object[][]{ + { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("foo", 0) }, + { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("foo", 0, "someOtherField") }, + { + new RankableObjectWithFields("foo", 0, "someField"), new RankableObjectWithFields("foo", 0, + "someOtherField") + } + }; + } + + @Test(dataProvider = "trueDuplicatesData") + public void shouldBeEqualToTrueDuplicates(RankableObjectWithFields r, RankableObjectWithFields trueDuplicate) { + assertTrue(r.equals(trueDuplicate), r + " is not equal to " + trueDuplicate + " but it should be"); + } + + @Test(dataProvider = "trueDuplicatesData") + public void shouldHaveSameHashCodeAsTrueDuplicates(RankableObjectWithFields r, + RankableObjectWithFields trueDuplicate) { + assertThat(r.hashCode()).isEqualTo(trueDuplicate.hashCode()); + } + + @DataProvider + public Object[][] compareToData() { + return new Object[][]{ + { + new RankableObjectWithFields("foo", 1000), new RankableObjectWithFields("foo", 0), + GREATER_THAN + }, { + new RankableObjectWithFields("foo", 1), new RankableObjectWithFields("foo", 0), + GREATER_THAN + }, { + new RankableObjectWithFields("foo", 1000), new RankableObjectWithFields("bar", 0), + GREATER_THAN + }, { + new RankableObjectWithFields("foo", 1), new RankableObjectWithFields("bar", 0), + GREATER_THAN + }, { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("foo", 0), EQUAL_TO }, + { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("bar", 0), EQUAL_TO }, + { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("foo", 1000), SMALLER_THAN }, + { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("foo", 1), SMALLER_THAN }, + { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("bar", 1), SMALLER_THAN }, + { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("bar", 1000), SMALLER_THAN }, + }; + } + + @Test(dataProvider = "compareToData") + public void verifyCompareTo(RankableObjectWithFields first, RankableObjectWithFields second, int expCompareToValue) { + assertThat(first.compareTo(second)).isEqualTo(expCompareToValue); + } + + @DataProvider + public Object[][] toStringData() { + return new Object[][]{ {"foo", 0L }, {"BAR", 8L } }; + } + + @Test(dataProvider = "toStringData") + public void toStringShouldContainStringRepresentationsOfObjectAndCount(Object obj, long count) { + // given + RankableObjectWithFields r = new RankableObjectWithFields(obj, count); + + // when + String strRepresentation = r.toString(); + + // then + assertThat(strRepresentation).contains(obj.toString()).contains("" + count); + } + + @Test + public void shouldReturnTheObject() { + // given + RankableObjectWithFields r = new RankableObjectWithFields(ANY_OBJECT, ANY_COUNT, ANY_FIELD); + + // when + Object obj = r.getObject(); + + // then + assertThat(obj).isEqualTo(ANY_OBJECT); + } + + @Test + public void shouldReturnTheCount() { + // given + RankableObjectWithFields r = new RankableObjectWithFields(ANY_OBJECT, ANY_COUNT, ANY_FIELD); + + // when + long count = r.getCount(); + + // then + assertThat(count).isEqualTo(ANY_COUNT); + } + + @DataProvider + public Object[][] fieldsData() { + return new Object[][]{ + { ANY_OBJECT, ANY_COUNT, new Object[]{ ANY_FIELD } }, + { "quux", 42L, new Object[]{ "one", "two", "three" } } + }; + } + + @Test(dataProvider = "fieldsData") + public void shouldReturnTheFields(Object obj, long count, Object[] fields) { + // given + RankableObjectWithFields r = new RankableObjectWithFields(obj, count, fields); + + // when + List actualFields = r.getFields(); + + // then + assertThat(actualFields).isEqualTo(Lists.newArrayList(fields)); + } + + @Test(expectedExceptions = UnsupportedOperationException.class) + public void fieldsShouldBeImmutable() { + // given + RankableObjectWithFields r = new RankableObjectWithFields(ANY_OBJECT, ANY_COUNT, ANY_FIELD); + + // when + List fields = r.getFields(); + // try to modify the list, which should fail + fields.remove(0); + + // then (exception) + } + + @Test + public void shouldCreateRankableObjectFromTuple() { + // given + Tuple tuple = mock(Tuple.class); + List tupleValues = Lists.newArrayList(ANY_OBJECT, ANY_COUNT, ANY_FIELD); + when(tuple.getValues()).thenReturn(tupleValues); + + // when + RankableObjectWithFields r = RankableObjectWithFields.from(tuple); + + // then + assertThat(r.getObject()).isEqualTo(ANY_OBJECT); + assertThat(r.getCount()).isEqualTo(ANY_COUNT); + List fields = new ArrayList<>(); + fields.add(ANY_FIELD); + assertThat(r.getFields()).isEqualTo(fields); + + } + + @DataProvider + public Object[][] copyData() { + return new Object[][]{ + { new RankableObjectWithFields("foo", 0) }, { + new RankableObjectWithFields("foo", 3, + "someOtherField") + }, { new RankableObjectWithFields("foo", 0, "someField") } + }; + } + + // TODO: What would be a good test to ensure that RankableObjectWithFields is at least somewhat defensively copied? + // The contract of Rankable#copy() returns a Rankable value, not a RankableObjectWithFields. + @Test(dataProvider = "copyData") + public void copyShouldReturnCopy(RankableObjectWithFields original) { + // given + + // when + Rankable copy = original.copy(); + + // then + assertThat(copy.getObject()).isEqualTo(original.getObject()); + assertThat(copy.getCount()).isEqualTo(original.getCount()); + } + +} diff --git a/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/RankingsTest.java b/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/RankingsTest.java new file mode 100644 index 00000000000..206d45c77d4 --- /dev/null +++ b/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/RankingsTest.java @@ -0,0 +1,407 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.tools; + +import com.google.common.base.Throwables; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import java.util.List; +import org.jmock.lib.concurrent.Blitzer; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +import static org.fest.assertions.api.Assertions.assertThat; + +public class RankingsTest { + + private static final int ANY_TOPN = 42; + private static final Rankable ANY_RANKABLE = new RankableObjectWithFields("someObject", ANY_TOPN); + private static final Rankable ZERO = new RankableObjectWithFields("ZERO_COUNT", 0); + private static final Rankable A = new RankableObjectWithFields("A", 1); + private static final Rankable B = new RankableObjectWithFields("B", 2); + private static final Rankable C = new RankableObjectWithFields("C", 3); + private static final Rankable D = new RankableObjectWithFields("D", 4); + private static final Rankable E = new RankableObjectWithFields("E", 5); + private static final Rankable F = new RankableObjectWithFields("F", 6); + private static final Rankable G = new RankableObjectWithFields("G", 7); + private static final Rankable H = new RankableObjectWithFields("H", 8); + + @DataProvider + public Object[][] illegalTopNData() { + return new Object[][]{ { 0 }, { -1 }, { -2 }, { -10 } }; + } + + @Test(expectedExceptions = IllegalArgumentException.class, dataProvider = "illegalTopNData") + public void constructorWithNegativeOrZeroTopNShouldThrowIAE(int topN) { + new Rankings(topN); + } + + @DataProvider + public Object[][] copyRankingsData() { + return new Object[][]{ + { 5, Lists.newArrayList(A, B, C) }, { 2, Lists.newArrayList(A, B, C, D) }, + { 1, Lists.newArrayList() }, { 1, Lists.newArrayList(A) }, { 1, Lists.newArrayList(A, B) } + }; + } + + @Test(dataProvider = "copyRankingsData") + public void copyConstructorShouldReturnCopy(int topN, List rankables) { + // given + Rankings rankings = new Rankings(topN); + for (Rankable r : rankables) { + rankings.updateWith(r); + } + + // when + Rankings copy = new Rankings(rankings); + + // then + assertThat(copy.maxSize()).isEqualTo(rankings.maxSize()); + assertThat(copy.getRankings()).isEqualTo(rankings.getRankings()); + } + + @DataProvider + public Object[][] defensiveCopyRankingsData() { + return new Object[][]{ + { 5, Lists.newArrayList(A, B, C), Lists.newArrayList(D) }, { + 2, Lists.newArrayList(A, B, C, + D), Lists.newArrayList(E, F) + }, { 1, Lists.newArrayList(), Lists.newArrayList(A) }, { + 1, Lists.newArrayList(A), + Lists.newArrayList(B) + }, { 1, Lists.newArrayList(ZERO), Lists.newArrayList(B) }, { + 1, Lists.newArrayList(ZERO), + Lists.newArrayList() + } + }; + } + + @Test(dataProvider = "defensiveCopyRankingsData") + public void copyConstructorShouldReturnDefensiveCopy(int topN, List rankables, List changes) { + // given + Rankings original = new Rankings(topN); + for (Rankable r : rankables) { + original.updateWith(r); + } + int expSize = original.size(); + List expRankings = original.getRankings(); + + // when + Rankings copy = new Rankings(original); + for (Rankable r : changes) { + copy.updateWith(r); + } + + // then + assertThat(original.size()).isEqualTo(expSize); + assertThat(original.getRankings()).isEqualTo(expRankings); + } + + @DataProvider + public Object[][] legalTopNData() { + return new Object[][]{ { 1 }, { 2 }, { 1000 }, { 1000000 } }; + } + + @Test(dataProvider = "legalTopNData") + public void constructorWithPositiveTopNShouldBeOk(int topN) { + // given/when + Rankings rankings = new Rankings(topN); + + // then + assertThat(rankings.maxSize()).isEqualTo(topN); + } + + @Test + public void shouldHaveDefaultConstructor() { + new Rankings(); + } + + @Test + public void defaultConstructorShouldSetPositiveTopN() { + // given/when + Rankings rankings = new Rankings(); + + // then + assertThat(rankings.maxSize()).isGreaterThan(0); + } + + @DataProvider + public Object[][] rankingsGrowData() { + return new Object[][]{ + { + 2, Lists.newArrayList(new RankableObjectWithFields("A", 1), new RankableObjectWithFields( + "B", 2), new RankableObjectWithFields("C", 3)) + }, { + 2, Lists.newArrayList(new RankableObjectWithFields("A", 1), + new RankableObjectWithFields("B", 2), new RankableObjectWithFields("C", 3), + new RankableObjectWithFields("D", + 4)) + } + }; + } + + @Test(dataProvider = "rankingsGrowData") + public void sizeOfRankingsShouldNotGrowBeyondTopN(int topN, List rankables) { + // sanity check of the provided test data + assertThat(rankables.size()).overridingErrorMessage( + "The supplied test data is not correct: the number of rankables <%d> should be greater than <%d>", + rankables.size(), topN).isGreaterThan(topN); + + // given + Rankings rankings = new Rankings(topN); + + // when + for (Rankable r : rankables) { + rankings.updateWith(r); + } + + // then + assertThat(rankings.size()).isLessThanOrEqualTo(rankings.maxSize()); + } + + @DataProvider + public Object[][] simulatedRankingsData() { + return new Object[][]{ + { Lists.newArrayList(A), Lists.newArrayList(A) }, { + Lists.newArrayList(B, D, A, C), + Lists.newArrayList(D, C, B, A) + }, { + Lists.newArrayList(B, F, A, C, D, E), Lists.newArrayList(F, E, D, C, B, + A) + }, { Lists.newArrayList(G, B, F, A, C, D, E, H), Lists.newArrayList(H, G, F, E, D, C, B, A) } + }; + } + + @Test(dataProvider = "simulatedRankingsData") + public void shouldCorrectlyRankWhenUpdatedWithRankables(List unsorted, List expSorted) { + // given + Rankings rankings = new Rankings(unsorted.size()); + + // when + for (Rankable r : unsorted) { + rankings.updateWith(r); + } + + // then + assertThat(rankings.getRankings()).isEqualTo(expSorted); + } + + @Test(dataProvider = "simulatedRankingsData") + public void shouldCorrectlyRankWhenEmptyAndUpdatedWithOtherRankings(List unsorted, + List expSorted) { + // given + Rankings rankings = new Rankings(unsorted.size()); + Rankings otherRankings = new Rankings(rankings.maxSize()); + for (Rankable r : unsorted) { + otherRankings.updateWith(r); + } + + // when + rankings.updateWith(otherRankings); + + // then + assertThat(rankings.getRankings()).isEqualTo(expSorted); + } + + @Test(dataProvider = "simulatedRankingsData") + public void shouldCorrectlyRankWhenUpdatedWithEmptyOtherRankings(List unsorted, List expSorted) { + // given + Rankings rankings = new Rankings(unsorted.size()); + for (Rankable r : unsorted) { + rankings.updateWith(r); + } + Rankings emptyRankings = new Rankings(ANY_TOPN); + + // when + rankings.updateWith(emptyRankings); + + // then + assertThat(rankings.getRankings()).isEqualTo(expSorted); + } + + @DataProvider + public Object[][] simulatedRankingsAndOtherRankingsData() { + return new Object[][]{ + { Lists.newArrayList(A), Lists.newArrayList(A), Lists.newArrayList(A) }, + { Lists.newArrayList(A, C), Lists.newArrayList(B, D), Lists.newArrayList(D, C, B, A) }, { + Lists.newArrayList(B, + F, A), Lists.newArrayList(C, D, E), Lists.newArrayList(F, E, D, C, B, A) + }, { + Lists.newArrayList(G, B, F, A, C), + Lists.newArrayList(D, E, H), Lists.newArrayList(H, G, F, E, D, C, B, A) + } + }; + } + + @Test(dataProvider = "simulatedRankingsAndOtherRankingsData") + public void shouldCorrectlyRankWhenNotEmptyAndUpdatedWithOtherRankings(List unsorted, + List unsortedForOtherRankings, + List expSorted) { + // given + Rankings rankings = new Rankings(expSorted.size()); + for (Rankable r : unsorted) { + rankings.updateWith(r); + } + Rankings otherRankings = new Rankings(unsortedForOtherRankings.size()); + for (Rankable r : unsortedForOtherRankings) { + otherRankings.updateWith(r); + } + + // when + rankings.updateWith(otherRankings); + + // then + assertThat(rankings.getRankings()).isEqualTo(expSorted); + } + + @DataProvider + public Object[][] duplicatesData() { + Rankable A1 = new RankableObjectWithFields("A", 1); + Rankable A2 = new RankableObjectWithFields("A", 2); + Rankable A3 = new RankableObjectWithFields("A", 3); + return new Object[][]{ + { Lists.newArrayList(ANY_RANKABLE, ANY_RANKABLE, ANY_RANKABLE) }, { + Lists.newArrayList(A1, + A2, A3) + }, + }; + } + + @Test(dataProvider = "duplicatesData") + public void shouldNotRankDuplicateObjectsMoreThanOnce(List duplicates) { + // given + Rankings rankings = new Rankings(duplicates.size()); + + // when + for (Rankable r : duplicates) { + rankings.updateWith(r); + } + + // then + assertThat(rankings.size()).isEqualTo(1); + } + + @DataProvider + public Object[][] removeZeroRankingsData() { + return new Object[][]{ + { Lists.newArrayList(A, ZERO), Lists.newArrayList(A) }, { + Lists.newArrayList(A), + Lists.newArrayList(A) + }, { Lists.newArrayList(ZERO, A), Lists.newArrayList(A) }, { + Lists.newArrayList(ZERO), + Lists.newArrayList() + }, { + Lists.newArrayList(ZERO, new RankableObjectWithFields("ZERO2", 0)), + Lists.newArrayList() + }, { + Lists.newArrayList(B, ZERO, new RankableObjectWithFields("ZERO2", 0), D, + new RankableObjectWithFields("ZERO3", 0), new RankableObjectWithFields("ZERO4", 0), C), Lists.newArrayList(D, + C, + B) + }, { Lists.newArrayList(A, ZERO, B), Lists.newArrayList(B, A) } + }; + } + + @Test(dataProvider = "removeZeroRankingsData") + public void shouldRemoveZeroCounts(List unsorted, List expSorted) { + // given + Rankings rankings = new Rankings(unsorted.size()); + for (Rankable r : unsorted) { + rankings.updateWith(r); + } + + // when + rankings.pruneZeroCounts(); + + // then + assertThat(rankings.getRankings()).isEqualTo(expSorted); + } + + @Test + public void updatingWithNewRankablesShouldBeThreadSafe() throws InterruptedException { + // given + final List entries = ImmutableList.of(A, B, C, D); + final Rankings rankings = new Rankings(entries.size()); + + // We are capturing exceptions thrown in Blitzer's child threads into this data structure so that we can properly + // pass/fail this test. The reason is that Blitzer doesn't report exceptions, which is a known bug in Blitzer + // (JMOCK-263). See https://github.com/jmock-developers/jmock-library/issues/22 for more information. + final List exceptions = Lists.newArrayList(); + Blitzer blitzer = new Blitzer(1000); + + // when + blitzer.blitz(new Runnable() { + @Override + public void run() { + for (Rankable r : entries) { + try { + rankings.updateWith(r); + } catch (RuntimeException e) { + synchronized (exceptions) { + exceptions.add(e); + } + } + } + } + }); + blitzer.shutdown(); + + // then + // + if (!exceptions.isEmpty()) { + for (Exception e : exceptions) { + System.err.println(Throwables.getStackTraceAsString(e)); + } + } + assertThat(exceptions).isEmpty(); + } + + @Test(dataProvider = "copyRankingsData") + public void copyShouldReturnCopy(int topN, List rankables) { + // given + Rankings rankings = new Rankings(topN); + for (Rankable r : rankables) { + rankings.updateWith(r); + } + + // when + Rankings copy = rankings.copy(); + + // then + assertThat(copy.maxSize()).isEqualTo(rankings.maxSize()); + assertThat(copy.getRankings()).isEqualTo(rankings.getRankings()); + } + + @Test(dataProvider = "defensiveCopyRankingsData") + public void copyShouldReturnDefensiveCopy(int topN, List rankables, List changes) { + // given + Rankings original = new Rankings(topN); + for (Rankable r : rankables) { + original.updateWith(r); + } + int expSize = original.size(); + List expRankings = original.getRankings(); + + // when + Rankings copy = original.copy(); + for (Rankable r : changes) { + copy.updateWith(r); + } + copy.pruneZeroCounts(); + + // then + assertThat(original.size()).isEqualTo(expSize); + assertThat(original.getRankings()).isEqualTo(expRankings); + } + +} diff --git a/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/SlidingWindowCounterTest.java b/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/SlidingWindowCounterTest.java new file mode 100644 index 00000000000..3bdb4ff25e6 --- /dev/null +++ b/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/SlidingWindowCounterTest.java @@ -0,0 +1,103 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.tools; + +import java.util.Map; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +import static org.fest.assertions.api.Assertions.assertThat; + +public class SlidingWindowCounterTest { + + private static final int ANY_WINDOW_LENGTH_IN_SLOTS = 2; + private static final Object ANY_OBJECT = "ANY_OBJECT"; + + @DataProvider + public Object[][] illegalWindowLengths() { + return new Object[][]{ { -10 }, { -3 }, { -2 }, { -1 }, { 0 }, { 1 } }; + } + + @Test(expectedExceptions = IllegalArgumentException.class, dataProvider = "illegalWindowLengths") + public void lessThanTwoSlotsShouldThrowIAE(int windowLengthInSlots) { + new SlidingWindowCounter(windowLengthInSlots); + } + + @DataProvider + public Object[][] legalWindowLengths() { + return new Object[][]{ { 2 }, { 3 }, { 20 } }; + } + + @Test(dataProvider = "legalWindowLengths") + public void twoOrMoreSlotsShouldBeValid(int windowLengthInSlots) { + new SlidingWindowCounter(windowLengthInSlots); + } + + @Test + public void newInstanceShouldHaveEmptyCounts() { + // given + SlidingWindowCounter counter = new SlidingWindowCounter(ANY_WINDOW_LENGTH_IN_SLOTS); + + // when + Map counts = counter.getCountsThenAdvanceWindow(); + + // then + assertThat(counts).isEmpty(); + } + + @DataProvider + public Object[][] simulatedCounterIterations() { + return new Object[][]{ + { 2, new int[]{ 3, 2, 0, 0, 1, 0, 0, 0 }, new long[]{ 3, 5, 2, 0, 1, 1, 0, 0 } }, + { 3, new int[]{ 3, 2, 0, 0, 1, 0, 0, 0 }, new long[]{ 3, 5, 5, 2, 1, 1, 1, 0 } }, + { 4, new int[]{ 3, 2, 0, 0, 1, 0, 0, 0 }, new long[]{ 3, 5, 5, 5, 3, 1, 1, 1 } }, + { 5, new int[]{ 3, 2, 0, 0, 1, 0, 0, 0 }, new long[]{ 3, 5, 5, 5, 6, 3, 1, 1 } }, + { + 5, new int[]{ 3, 11, 5, 13, 7, 17, 0, 3, 50, 600, 7000 }, + new long[]{ 3, 14, 19, 32, 39, 53, 42, 40, 77, 670, 7653 } + }, + }; + } + + @Test(dataProvider = "simulatedCounterIterations") + public void testCounterWithSimulatedRuns(int windowLengthInSlots, int[] incrementsPerIteration, + long[] expCountsPerIteration) { + // given + SlidingWindowCounter counter = new SlidingWindowCounter(windowLengthInSlots); + int numIterations = incrementsPerIteration.length; + + for (int i = 0; i < numIterations; i++) { + int numIncrements = incrementsPerIteration[i]; + long expCounts = expCountsPerIteration[i]; + // Objects are absent if they were zero both this iteration + // and the last -- if only this one, we need to report zero. + boolean expAbsent = ((expCounts == 0) && ((i == 0) || (expCountsPerIteration[i - 1] == 0))); + + // given (for this iteration) + for (int j = 0; j < numIncrements; j++) { + counter.incrementCount(ANY_OBJECT); + } + + // when (for this iteration) + Map counts = counter.getCountsThenAdvanceWindow(); + + // then (for this iteration) + if (expAbsent) { + assertThat(counts).doesNotContainKey(ANY_OBJECT); + } else { + assertThat(counts.get(ANY_OBJECT)).isEqualTo(expCounts); + } + } + } + +} diff --git a/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/SlotBasedCounterTest.java b/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/SlotBasedCounterTest.java new file mode 100644 index 00000000000..df2ab21d65c --- /dev/null +++ b/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/SlotBasedCounterTest.java @@ -0,0 +1,175 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.starter.tools; + +import java.util.Map; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +import static org.fest.assertions.api.Assertions.assertThat; + +public class SlotBasedCounterTest { + + private static final int ANY_NUM_SLOTS = 1; + private static final int ANY_SLOT = 0; + private static final Object ANY_OBJECT = "ANY_OBJECT"; + + @DataProvider + public Object[][] illegalNumSlotsData() { + return new Object[][]{ { -10 }, { -3 }, { -2 }, { -1 }, { 0 } }; + } + + @Test(expectedExceptions = IllegalArgumentException.class, dataProvider = "illegalNumSlotsData") + public void negativeOrZeroNumSlotsShouldThrowIAE(int numSlots) { + new SlotBasedCounter(numSlots); + } + + @DataProvider + public Object[][] legalNumSlotsData() { + return new Object[][]{ { 1 }, { 2 }, { 3 }, { 20 } }; + } + + @Test(dataProvider = "legalNumSlotsData") + public void positiveNumSlotsShouldBeOk(int numSlots) { + new SlotBasedCounter(numSlots); + } + + @Test + public void newInstanceShouldHaveEmptyCounts() { + // given + SlotBasedCounter counter = new SlotBasedCounter(ANY_NUM_SLOTS); + + // when + Map counts = counter.getCounts(); + + // then + assertThat(counts).isEmpty(); + } + + @Test + public void shouldReturnNonEmptyCountsWhenAtLeastOneObjectWasCounted() { + // given + SlotBasedCounter counter = new SlotBasedCounter(ANY_NUM_SLOTS); + counter.incrementCount(ANY_OBJECT, ANY_SLOT); + + // when + Map counts = counter.getCounts(); + + // then + assertThat(counts).isNotEmpty(); + + // additional tests that go beyond what this test is primarily about + assertThat(counts.size()).isEqualTo(1); + assertThat(counts.get(ANY_OBJECT)).isEqualTo(1); + } + + @DataProvider + public Object[][] incrementCountData() { + return new Object[][]{ { new String[]{ "foo", "bar" }, new int[]{ 3, 2 } } }; + } + + @Test(dataProvider = "incrementCountData") + public void shouldIncrementCount(Object[] objects, int[] expCounts) { + // given + SlotBasedCounter counter = new SlotBasedCounter(ANY_NUM_SLOTS); + + // when + for (int i = 0; i < objects.length; i++) { + Object obj = objects[i]; + int numIncrements = expCounts[i]; + for (int j = 0; j < numIncrements; j++) { + counter.incrementCount(obj, ANY_SLOT); + } + } + + // then + for (int i = 0; i < objects.length; i++) { + assertThat(counter.getCount(objects[i], ANY_SLOT)).isEqualTo(expCounts[i]); + } + assertThat(counter.getCount("nonexistentObject", ANY_SLOT)).isEqualTo(0); + } + + @Test + public void shouldReturnZeroForNonexistentObject() { + // given + SlotBasedCounter counter = new SlotBasedCounter(ANY_NUM_SLOTS); + + // when + counter.incrementCount("somethingElse", ANY_SLOT); + + // then + assertThat(counter.getCount("nonexistentObject", ANY_SLOT)).isEqualTo(0); + } + + @Test + public void shouldIncrementCountOnlyOneSlotAtATime() { + // given + int numSlots = 3; + Object obj = Long.valueOf(10); + SlotBasedCounter counter = new SlotBasedCounter(numSlots); + + // when (empty) + // then + assertThat(counter.getCount(obj, 0)).isEqualTo(0); + assertThat(counter.getCount(obj, 1)).isEqualTo(0); + assertThat(counter.getCount(obj, 2)).isEqualTo(0); + + // when + counter.incrementCount(obj, 1); + + // then + assertThat(counter.getCount(obj, 0)).isEqualTo(0); + assertThat(counter.getCount(obj, 1)).isEqualTo(1); + assertThat(counter.getCount(obj, 2)).isEqualTo(0); + } + + @Test + public void wipeSlotShouldSetAllCountsInSlotToZero() { + // given + SlotBasedCounter counter = new SlotBasedCounter(ANY_NUM_SLOTS); + Object countWasOne = "countWasOne"; + Object countWasThree = "countWasThree"; + counter.incrementCount(countWasOne, ANY_SLOT); + counter.incrementCount(countWasThree, ANY_SLOT); + counter.incrementCount(countWasThree, ANY_SLOT); + counter.incrementCount(countWasThree, ANY_SLOT); + + // when + counter.wipeSlot(ANY_SLOT); + + // then + assertThat(counter.getCount(countWasOne, ANY_SLOT)).isEqualTo(0); + assertThat(counter.getCount(countWasThree, ANY_SLOT)).isEqualTo(0); + } + + @Test + public void wipeZerosShouldRemoveAnyObjectsWithZeroTotalCount() { + // given + SlotBasedCounter counter = new SlotBasedCounter(2); + int wipeSlot = 0; + int otherSlot = 1; + Object willBeRemoved = "willBeRemoved"; + Object willContinueToBeTracked = "willContinueToBeTracked"; + counter.incrementCount(willBeRemoved, wipeSlot); + counter.incrementCount(willContinueToBeTracked, wipeSlot); + counter.incrementCount(willContinueToBeTracked, otherSlot); + + // when + counter.wipeSlot(wipeSlot); + counter.wipeZeros(); + + // then + assertThat(counter.getCounts()).doesNotContainKey(willBeRemoved); + assertThat(counter.getCounts()).containsKey(willContinueToBeTracked); + } +} diff --git a/examples/storm-starter/test/jvm/storm/starter/bolt/IntermediateRankingsBoltTest.java b/examples/storm-starter/test/jvm/storm/starter/bolt/IntermediateRankingsBoltTest.java deleted file mode 100644 index c296a899e4d..00000000000 --- a/examples/storm-starter/test/jvm/storm/starter/bolt/IntermediateRankingsBoltTest.java +++ /dev/null @@ -1,146 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter.bolt; - -import backtype.storm.Config; -import backtype.storm.topology.BasicOutputCollector; -import backtype.storm.topology.OutputFieldsDeclarer; -import backtype.storm.tuple.Fields; -import backtype.storm.tuple.Tuple; -import backtype.storm.tuple.Values; -import com.google.common.collect.Lists; -import org.testng.annotations.DataProvider; -import org.testng.annotations.Test; -import storm.starter.tools.MockTupleHelpers; - -import java.util.Map; - -import static org.fest.assertions.api.Assertions.assertThat; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.*; - -public class IntermediateRankingsBoltTest { - - private static final String ANY_NON_SYSTEM_COMPONENT_ID = "irrelevant_component_id"; - private static final String ANY_NON_SYSTEM_STREAM_ID = "irrelevant_stream_id"; - private static final Object ANY_OBJECT = new Object(); - private static final int ANY_TOPN = 10; - private static final long ANY_COUNT = 42; - - private Tuple mockRankableTuple(Object obj, long count) { - Tuple tuple = MockTupleHelpers.mockTuple(ANY_NON_SYSTEM_COMPONENT_ID, ANY_NON_SYSTEM_STREAM_ID); - when(tuple.getValues()).thenReturn(Lists.newArrayList(ANY_OBJECT, ANY_COUNT)); - return tuple; - } - - @DataProvider - public Object[][] illegalTopN() { - return new Object[][]{ { -10 }, { -3 }, { -2 }, { -1 }, { 0 } }; - } - - @Test(expectedExceptions = IllegalArgumentException.class, dataProvider = "illegalTopN") - public void negativeOrZeroTopNShouldThrowIAE(int topN) { - new IntermediateRankingsBolt(topN); - } - - @DataProvider - public Object[][] illegalEmitFrequency() { - return new Object[][]{ { -10 }, { -3 }, { -2 }, { -1 }, { 0 } }; - } - - @Test(expectedExceptions = IllegalArgumentException.class, dataProvider = "illegalEmitFrequency") - public void negativeOrZeroEmitFrequencyShouldThrowIAE(int emitFrequencyInSeconds) { - new IntermediateRankingsBolt(ANY_TOPN, emitFrequencyInSeconds); - } - - @DataProvider - public Object[][] legalTopN() { - return new Object[][]{ { 1 }, { 2 }, { 3 }, { 20 } }; - } - - @Test(dataProvider = "legalTopN") - public void positiveTopNShouldBeOk(int topN) { - new IntermediateRankingsBolt(topN); - } - - @DataProvider - public Object[][] legalEmitFrequency() { - return new Object[][]{ { 1 }, { 2 }, { 3 }, { 20 } }; - } - - @Test(dataProvider = "legalEmitFrequency") - public void positiveEmitFrequencyShouldBeOk(int emitFrequencyInSeconds) { - new IntermediateRankingsBolt(ANY_TOPN, emitFrequencyInSeconds); - } - - @Test - public void shouldEmitSomethingIfTickTupleIsReceived() { - // given - Tuple tickTuple = MockTupleHelpers.mockTickTuple(); - BasicOutputCollector collector = mock(BasicOutputCollector.class); - IntermediateRankingsBolt bolt = new IntermediateRankingsBolt(); - - // when - bolt.execute(tickTuple, collector); - - // then - // verifyZeroInteractions(collector); - verify(collector).emit(any(Values.class)); - } - - @Test - public void shouldEmitNothingIfNormalTupleIsReceived() { - // given - Tuple normalTuple = mockRankableTuple(ANY_OBJECT, ANY_COUNT); - BasicOutputCollector collector = mock(BasicOutputCollector.class); - IntermediateRankingsBolt bolt = new IntermediateRankingsBolt(); - - // when - bolt.execute(normalTuple, collector); - - // then - verifyZeroInteractions(collector); - } - - @Test - public void shouldDeclareOutputFields() { - // given - OutputFieldsDeclarer declarer = mock(OutputFieldsDeclarer.class); - IntermediateRankingsBolt bolt = new IntermediateRankingsBolt(); - - // when - bolt.declareOutputFields(declarer); - - // then - verify(declarer, times(1)).declare(any(Fields.class)); - } - - @Test - public void shouldSetTickTupleFrequencyInComponentConfigurationToNonZeroValue() { - // given - IntermediateRankingsBolt bolt = new IntermediateRankingsBolt(); - - // when - Map componentConfig = bolt.getComponentConfiguration(); - - // then - assertThat(componentConfig).containsKey(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS); - Integer emitFrequencyInSeconds = (Integer) componentConfig.get(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS); - assertThat(emitFrequencyInSeconds).isGreaterThan(0); - } -} diff --git a/examples/storm-starter/test/jvm/storm/starter/bolt/RollingCountBoltTest.java b/examples/storm-starter/test/jvm/storm/starter/bolt/RollingCountBoltTest.java deleted file mode 100644 index bc31ba06d4d..00000000000 --- a/examples/storm-starter/test/jvm/storm/starter/bolt/RollingCountBoltTest.java +++ /dev/null @@ -1,113 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter.bolt; - -import backtype.storm.Config; -import backtype.storm.task.OutputCollector; -import backtype.storm.task.TopologyContext; -import backtype.storm.topology.OutputFieldsDeclarer; -import backtype.storm.tuple.Fields; -import backtype.storm.tuple.Tuple; -import backtype.storm.tuple.Values; -import org.testng.annotations.Test; -import storm.starter.tools.MockTupleHelpers; - -import java.util.Map; - -import static org.fest.assertions.api.Assertions.assertThat; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.*; - -public class RollingCountBoltTest { - - private static final String ANY_NON_SYSTEM_COMPONENT_ID = "irrelevant_component_id"; - private static final String ANY_NON_SYSTEM_STREAM_ID = "irrelevant_stream_id"; - - private Tuple mockNormalTuple(Object obj) { - Tuple tuple = MockTupleHelpers.mockTuple(ANY_NON_SYSTEM_COMPONENT_ID, ANY_NON_SYSTEM_STREAM_ID); - when(tuple.getValue(0)).thenReturn(obj); - return tuple; - } - - @SuppressWarnings("rawtypes") - @Test - public void shouldEmitNothingIfNoObjectHasBeenCountedYetAndTickTupleIsReceived() { - // given - Tuple tickTuple = MockTupleHelpers.mockTickTuple(); - RollingCountBolt bolt = new RollingCountBolt(); - Map conf = mock(Map.class); - TopologyContext context = mock(TopologyContext.class); - OutputCollector collector = mock(OutputCollector.class); - bolt.prepare(conf, context, collector); - - // when - bolt.execute(tickTuple); - - // then - verifyZeroInteractions(collector); - } - - @SuppressWarnings("rawtypes") - @Test - public void shouldEmitSomethingIfAtLeastOneObjectWasCountedAndTickTupleIsReceived() { - // given - Tuple normalTuple = mockNormalTuple(new Object()); - Tuple tickTuple = MockTupleHelpers.mockTickTuple(); - - RollingCountBolt bolt = new RollingCountBolt(); - Map conf = mock(Map.class); - TopologyContext context = mock(TopologyContext.class); - OutputCollector collector = mock(OutputCollector.class); - bolt.prepare(conf, context, collector); - - // when - bolt.execute(normalTuple); - bolt.execute(tickTuple); - - // then - verify(collector).emit(any(Values.class)); - } - - @Test - public void shouldDeclareOutputFields() { - // given - OutputFieldsDeclarer declarer = mock(OutputFieldsDeclarer.class); - RollingCountBolt bolt = new RollingCountBolt(); - - // when - bolt.declareOutputFields(declarer); - - // then - verify(declarer, times(1)).declare(any(Fields.class)); - - } - - @Test - public void shouldSetTickTupleFrequencyInComponentConfigurationToNonZeroValue() { - // given - RollingCountBolt bolt = new RollingCountBolt(); - - // when - Map componentConfig = bolt.getComponentConfiguration(); - - // then - assertThat(componentConfig).containsKey(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS); - Integer emitFrequencyInSeconds = (Integer) componentConfig.get(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS); - assertThat(emitFrequencyInSeconds).isGreaterThan(0); - } -} diff --git a/examples/storm-starter/test/jvm/storm/starter/bolt/TotalRankingsBoltTest.java b/examples/storm-starter/test/jvm/storm/starter/bolt/TotalRankingsBoltTest.java deleted file mode 100644 index 49e3d679f27..00000000000 --- a/examples/storm-starter/test/jvm/storm/starter/bolt/TotalRankingsBoltTest.java +++ /dev/null @@ -1,147 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter.bolt; - -import backtype.storm.Config; -import backtype.storm.topology.BasicOutputCollector; -import backtype.storm.topology.OutputFieldsDeclarer; -import backtype.storm.tuple.Fields; -import backtype.storm.tuple.Tuple; -import backtype.storm.tuple.Values; -import org.testng.annotations.DataProvider; -import org.testng.annotations.Test; -import storm.starter.tools.MockTupleHelpers; -import storm.starter.tools.Rankings; - -import java.util.Map; - -import static org.fest.assertions.api.Assertions.assertThat; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.*; - -public class TotalRankingsBoltTest { - - private static final String ANY_NON_SYSTEM_COMPONENT_ID = "irrelevant_component_id"; - private static final String ANY_NON_SYSTEM_STREAM_ID = "irrelevant_stream_id"; - private static final Object ANY_OBJECT = new Object(); - private static final int ANY_TOPN = 10; - private static final long ANY_COUNT = 42; - - private Tuple mockRankingsTuple(Object obj, long count) { - Tuple tuple = MockTupleHelpers.mockTuple(ANY_NON_SYSTEM_COMPONENT_ID, ANY_NON_SYSTEM_STREAM_ID); - Rankings rankings = mock(Rankings.class); - when(tuple.getValue(0)).thenReturn(rankings); - return tuple; - } - - @DataProvider - public Object[][] illegalTopN() { - return new Object[][]{ { -10 }, { -3 }, { -2 }, { -1 }, { 0 } }; - } - - @Test(expectedExceptions = IllegalArgumentException.class, dataProvider = "illegalTopN") - public void negativeOrZeroTopNShouldThrowIAE(int topN) { - new TotalRankingsBolt(topN); - } - - @DataProvider - public Object[][] illegalEmitFrequency() { - return new Object[][]{ { -10 }, { -3 }, { -2 }, { -1 }, { 0 } }; - } - - @Test(expectedExceptions = IllegalArgumentException.class, dataProvider = "illegalEmitFrequency") - public void negativeOrZeroEmitFrequencyShouldThrowIAE(int emitFrequencyInSeconds) { - new TotalRankingsBolt(ANY_TOPN, emitFrequencyInSeconds); - } - - @DataProvider - public Object[][] legalTopN() { - return new Object[][]{ { 1 }, { 2 }, { 3 }, { 20 } }; - } - - @Test(dataProvider = "legalTopN") - public void positiveTopNShouldBeOk(int topN) { - new TotalRankingsBolt(topN); - } - - @DataProvider - public Object[][] legalEmitFrequency() { - return new Object[][]{ { 1 }, { 2 }, { 3 }, { 20 } }; - } - - @Test(dataProvider = "legalEmitFrequency") - public void positiveEmitFrequencyShouldBeOk(int emitFrequencyInSeconds) { - new TotalRankingsBolt(ANY_TOPN, emitFrequencyInSeconds); - } - - @Test - public void shouldEmitSomethingIfTickTupleIsReceived() { - // given - Tuple tickTuple = MockTupleHelpers.mockTickTuple(); - BasicOutputCollector collector = mock(BasicOutputCollector.class); - TotalRankingsBolt bolt = new TotalRankingsBolt(); - - // when - bolt.execute(tickTuple, collector); - - // then - // verifyZeroInteractions(collector); - verify(collector).emit(any(Values.class)); - } - - @Test - public void shouldEmitNothingIfNormalTupleIsReceived() { - // given - Tuple normalTuple = mockRankingsTuple(ANY_OBJECT, ANY_COUNT); - BasicOutputCollector collector = mock(BasicOutputCollector.class); - TotalRankingsBolt bolt = new TotalRankingsBolt(); - - // when - bolt.execute(normalTuple, collector); - - // then - verifyZeroInteractions(collector); - } - - @Test - public void shouldDeclareOutputFields() { - // given - OutputFieldsDeclarer declarer = mock(OutputFieldsDeclarer.class); - TotalRankingsBolt bolt = new TotalRankingsBolt(); - - // when - bolt.declareOutputFields(declarer); - - // then - verify(declarer, times(1)).declare(any(Fields.class)); - } - - @Test - public void shouldSetTickTupleFrequencyInComponentConfigurationToNonZeroValue() { - // given - TotalRankingsBolt bolt = new TotalRankingsBolt(); - - // when - Map componentConfig = bolt.getComponentConfiguration(); - - // then - assertThat(componentConfig).containsKey(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS); - Integer emitFrequencyInSeconds = (Integer) componentConfig.get(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS); - assertThat(emitFrequencyInSeconds).isGreaterThan(0); - } -} diff --git a/examples/storm-starter/test/jvm/storm/starter/tools/MockTupleHelpers.java b/examples/storm-starter/test/jvm/storm/starter/tools/MockTupleHelpers.java deleted file mode 100644 index b253350ef4d..00000000000 --- a/examples/storm-starter/test/jvm/storm/starter/tools/MockTupleHelpers.java +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter.tools; - -import backtype.storm.Constants; -import backtype.storm.tuple.Tuple; - -import static org.mockito.Mockito.*; - -public final class MockTupleHelpers { - - private MockTupleHelpers() { - } - - public static Tuple mockTickTuple() { - return mockTuple(Constants.SYSTEM_COMPONENT_ID, Constants.SYSTEM_TICK_STREAM_ID); - } - - public static Tuple mockTuple(String componentId, String streamId) { - Tuple tuple = mock(Tuple.class); - when(tuple.getSourceComponent()).thenReturn(componentId); - when(tuple.getSourceStreamId()).thenReturn(streamId); - return tuple; - } -} diff --git a/examples/storm-starter/test/jvm/storm/starter/tools/NthLastModifiedTimeTrackerTest.java b/examples/storm-starter/test/jvm/storm/starter/tools/NthLastModifiedTimeTrackerTest.java deleted file mode 100644 index fe4d987d87b..00000000000 --- a/examples/storm-starter/test/jvm/storm/starter/tools/NthLastModifiedTimeTrackerTest.java +++ /dev/null @@ -1,125 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter.tools; - -import backtype.storm.utils.Time; -import org.testng.annotations.DataProvider; -import org.testng.annotations.Test; - -import static org.fest.assertions.api.Assertions.assertThat; - -public class NthLastModifiedTimeTrackerTest { - - private static final int ANY_NUM_TIMES_TO_TRACK = 3; - private static final int MILLIS_IN_SEC = 1000; - - @DataProvider - public Object[][] illegalNumTimesData() { - return new Object[][]{ { -10 }, { -3 }, { -2 }, { -1 }, { 0 } }; - } - - @Test(expectedExceptions = IllegalArgumentException.class, dataProvider = "illegalNumTimesData") - public void negativeOrZeroNumTimesToTrackShouldThrowIAE(int numTimesToTrack) { - new NthLastModifiedTimeTracker(numTimesToTrack); - } - - @DataProvider - public Object[][] legalNumTimesData() { - return new Object[][]{ { 1 }, { 2 }, { 3 }, { 20 } }; - } - - @Test(dataProvider = "legalNumTimesData") - public void positiveNumTimesToTrackShouldBeOk(int numTimesToTrack) { - new NthLastModifiedTimeTracker(numTimesToTrack); - } - - @DataProvider - public Object[][] whenNotYetMarkedAsModifiedData() { - return new Object[][]{ { 0 }, { 1 }, { 2 }, { 3 }, { 4 }, { 5 }, { 8 }, { 10 } }; - } - - @Test(dataProvider = "whenNotYetMarkedAsModifiedData") - public void shouldReturnCorrectModifiedTimeEvenWhenNotYetMarkedAsModified(int secondsToAdvance) { - // given - Time.startSimulating(); - NthLastModifiedTimeTracker tracker = new NthLastModifiedTimeTracker(ANY_NUM_TIMES_TO_TRACK); - - // when - advanceSimulatedTimeBy(secondsToAdvance); - int seconds = tracker.secondsSinceOldestModification(); - - // then - assertThat(seconds).isEqualTo(secondsToAdvance); - - // cleanup - Time.stopSimulating(); - } - - @DataProvider - public Object[][] simulatedTrackerIterations() { - return new Object[][]{ { 1, new int[]{ 0, 1 }, new int[]{ 0, 0 } }, { 1, new int[]{ 0, 2 }, new int[]{ 0, 0 } }, - { 2, new int[]{ 2, 2 }, new int[]{ 2, 2 } }, { 2, new int[]{ 0, 4 }, new int[]{ 0, 4 } }, - { 1, new int[]{ 1, 1, 1, 1, 1, 1, 1 }, new int[]{ 0, 0, 0, 0, 0, 0, 0 } }, - { 1, new int[]{ 1, 2, 3, 4, 5, 6, 7 }, new int[]{ 0, 0, 0, 0, 0, 0, 0 } }, - { 2, new int[]{ 1, 1, 1, 1, 1, 1, 1 }, new int[]{ 1, 1, 1, 1, 1, 1, 1 } }, - { 2, new int[]{ 2, 2, 2, 2, 2, 2, 2 }, new int[]{ 2, 2, 2, 2, 2, 2, 2 } }, - { 2, new int[]{ 1, 2, 3, 4, 5, 6, 7 }, new int[]{ 1, 2, 3, 4, 5, 6, 7 } }, - { 3, new int[]{ 1, 1, 1, 1, 1, 1, 1 }, new int[]{ 1, 2, 2, 2, 2, 2, 2 } }, - { 3, new int[]{ 1, 2, 3, 4, 5, 6, 7 }, new int[]{ 1, 3, 5, 7, 9, 11, 13 } }, - { 3, new int[]{ 2, 2, 2, 2, 2, 2, 2 }, new int[]{ 2, 4, 4, 4, 4, 4, 4 } }, - { 4, new int[]{ 1, 1, 1, 1, 1, 1, 1 }, new int[]{ 1, 2, 3, 3, 3, 3, 3 } }, - { 4, new int[]{ 1, 2, 3, 4, 5, 6, 7 }, new int[]{ 1, 3, 6, 9, 12, 15, 18 } }, - { 4, new int[]{ 2, 2, 2, 2, 2, 2, 2 }, new int[]{ 2, 4, 6, 6, 6, 6, 6 } }, - { 5, new int[]{ 1, 1, 1, 1, 1, 1, 1 }, new int[]{ 1, 2, 3, 4, 4, 4, 4 } }, - { 5, new int[]{ 1, 2, 3, 4, 5, 6, 7 }, new int[]{ 1, 3, 6, 10, 14, 18, 22 } }, - { 5, new int[]{ 2, 2, 2, 2, 2, 2, 2 }, new int[]{ 2, 4, 6, 8, 8, 8, 8 } }, - { 6, new int[]{ 1, 1, 1, 1, 1, 1, 1 }, new int[]{ 1, 2, 3, 4, 5, 5, 5 } }, - { 6, new int[]{ 1, 2, 3, 4, 5, 6, 7 }, new int[]{ 1, 3, 6, 10, 15, 20, 25 } }, - { 6, new int[]{ 2, 2, 2, 2, 2, 2, 2 }, new int[]{ 2, 4, 6, 8, 10, 10, 10 } }, - { 3, new int[]{ 1, 2, 3 }, new int[]{ 1, 3, 5 } } }; - } - - @Test(dataProvider = "simulatedTrackerIterations") - public void shouldReturnCorrectModifiedTimeWhenMarkedAsModified(int numTimesToTrack, - int[] secondsToAdvancePerIteration, int[] expLastModifiedTimes) { - // given - Time.startSimulating(); - NthLastModifiedTimeTracker tracker = new NthLastModifiedTimeTracker(numTimesToTrack); - - int[] modifiedTimes = new int[expLastModifiedTimes.length]; - - // when - int i = 0; - for (int secondsToAdvance : secondsToAdvancePerIteration) { - advanceSimulatedTimeBy(secondsToAdvance); - tracker.markAsModified(); - modifiedTimes[i] = tracker.secondsSinceOldestModification(); - i++; - } - - // then - assertThat(modifiedTimes).isEqualTo(expLastModifiedTimes); - - // cleanup - Time.stopSimulating(); - } - - private void advanceSimulatedTimeBy(int seconds) { - Time.advanceTime(seconds * MILLIS_IN_SEC); - } -} diff --git a/examples/storm-starter/test/jvm/storm/starter/tools/RankableObjectWithFieldsTest.java b/examples/storm-starter/test/jvm/storm/starter/tools/RankableObjectWithFieldsTest.java deleted file mode 100644 index e83f9220941..00000000000 --- a/examples/storm-starter/test/jvm/storm/starter/tools/RankableObjectWithFieldsTest.java +++ /dev/null @@ -1,252 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter.tools; - -import backtype.storm.tuple.Tuple; -import com.google.common.collect.Lists; -import org.testng.annotations.DataProvider; -import org.testng.annotations.Test; - -import java.util.ArrayList; -import java.util.List; - -import static org.fest.assertions.api.Assertions.assertThat; -import static org.mockito.Mockito.*; -import static org.testng.Assert.assertFalse; -import static org.testng.Assert.assertTrue; - -public class RankableObjectWithFieldsTest { - - private static final Object ANY_OBJECT = new Object(); - private static final long ANY_COUNT = 271; - private static final String ANY_FIELD = "someAdditionalField"; - private static final int GREATER_THAN = 1; - private static final int EQUAL_TO = 0; - private static final int SMALLER_THAN = -1; - - @Test(expectedExceptions = IllegalArgumentException.class) - public void constructorWithNullObjectAndNoFieldsShouldThrowIAE() { - new RankableObjectWithFields(null, ANY_COUNT); - } - - @Test(expectedExceptions = IllegalArgumentException.class) - public void constructorWithNullObjectAndFieldsShouldThrowIAE() { - Object someAdditionalField = new Object(); - new RankableObjectWithFields(null, ANY_COUNT, someAdditionalField); - } - - @Test(expectedExceptions = IllegalArgumentException.class) - public void constructorWithNegativeCountAndNoFieldsShouldThrowIAE() { - new RankableObjectWithFields(ANY_OBJECT, -1); - } - - @Test(expectedExceptions = IllegalArgumentException.class) - public void constructorWithNegativeCountAndFieldsShouldThrowIAE() { - Object someAdditionalField = new Object(); - new RankableObjectWithFields(ANY_OBJECT, -1, someAdditionalField); - } - - @Test - public void shouldBeEqualToItself() { - RankableObjectWithFields r = new RankableObjectWithFields(ANY_OBJECT, ANY_COUNT); - assertThat(r).isEqualTo(r); - } - - @DataProvider - public Object[][] otherClassesData() { - return new Object[][]{ { new String("foo") }, { new Object() }, { Integer.valueOf(4) }, { Lists.newArrayList(7, 8, - 9) } }; - } - - @Test(dataProvider = "otherClassesData") - public void shouldNotBeEqualToInstancesOfOtherClasses(Object notARankable) { - RankableObjectWithFields r = new RankableObjectWithFields(ANY_OBJECT, ANY_COUNT); - assertFalse(r.equals(notARankable), r + " is equal to " + notARankable + " but it should not be"); - } - - @DataProvider - public Object[][] falseDuplicatesData() { - return new Object[][]{ { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("foo", 1) }, - { new RankableObjectWithFields("foo", 1), new RankableObjectWithFields("Foo", 1) }, - { new RankableObjectWithFields("foo", 1), new RankableObjectWithFields("FOO", 1) }, - { new RankableObjectWithFields("foo", 1), new RankableObjectWithFields("bar", 1) }, - { new RankableObjectWithFields("", 0), new RankableObjectWithFields("", 1) }, { new RankableObjectWithFields("", - 1), new RankableObjectWithFields("bar", 1) } }; - } - - @Test(dataProvider = "falseDuplicatesData") - public void shouldNotBeEqualToFalseDuplicates(RankableObjectWithFields r, RankableObjectWithFields falseDuplicate) { - assertFalse(r.equals(falseDuplicate), r + " is equal to " + falseDuplicate + " but it should not be"); - } - - @Test(dataProvider = "falseDuplicatesData") - public void shouldHaveDifferentHashCodeThanFalseDuplicates(RankableObjectWithFields r, - RankableObjectWithFields falseDuplicate) { - assertThat(r.hashCode()).isNotEqualTo(falseDuplicate.hashCode()); - } - - @DataProvider - public Object[][] trueDuplicatesData() { - return new Object[][]{ { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("foo", 0) }, - { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("foo", 0, "someOtherField") }, - { new RankableObjectWithFields("foo", 0, "someField"), new RankableObjectWithFields("foo", 0, - "someOtherField") } }; - } - - @Test(dataProvider = "trueDuplicatesData") - public void shouldBeEqualToTrueDuplicates(RankableObjectWithFields r, RankableObjectWithFields trueDuplicate) { - assertTrue(r.equals(trueDuplicate), r + " is not equal to " + trueDuplicate + " but it should be"); - } - - @Test(dataProvider = "trueDuplicatesData") - public void shouldHaveSameHashCodeAsTrueDuplicates(RankableObjectWithFields r, - RankableObjectWithFields trueDuplicate) { - assertThat(r.hashCode()).isEqualTo(trueDuplicate.hashCode()); - } - - @DataProvider - public Object[][] compareToData() { - return new Object[][]{ { new RankableObjectWithFields("foo", 1000), new RankableObjectWithFields("foo", 0), - GREATER_THAN }, { new RankableObjectWithFields("foo", 1), new RankableObjectWithFields("foo", 0), - GREATER_THAN }, { new RankableObjectWithFields("foo", 1000), new RankableObjectWithFields("bar", 0), - GREATER_THAN }, { new RankableObjectWithFields("foo", 1), new RankableObjectWithFields("bar", 0), - GREATER_THAN }, { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("foo", 0), EQUAL_TO }, - { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("bar", 0), EQUAL_TO }, - { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("foo", 1000), SMALLER_THAN }, - { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("foo", 1), SMALLER_THAN }, - { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("bar", 1), SMALLER_THAN }, - { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("bar", 1000), SMALLER_THAN }, }; - } - - @Test(dataProvider = "compareToData") - public void verifyCompareTo(RankableObjectWithFields first, RankableObjectWithFields second, int expCompareToValue) { - assertThat(first.compareTo(second)).isEqualTo(expCompareToValue); - } - - @DataProvider - public Object[][] toStringData() { - return new Object[][]{ { new String("foo"), 0L }, { new String("BAR"), 8L } }; - } - - @Test(dataProvider = "toStringData") - public void toStringShouldContainStringRepresentationsOfObjectAndCount(Object obj, long count) { - // given - RankableObjectWithFields r = new RankableObjectWithFields(obj, count); - - // when - String strRepresentation = r.toString(); - - // then - assertThat(strRepresentation).contains(obj.toString()).contains("" + count); - } - - @Test - public void shouldReturnTheObject() { - // given - RankableObjectWithFields r = new RankableObjectWithFields(ANY_OBJECT, ANY_COUNT, ANY_FIELD); - - // when - Object obj = r.getObject(); - - // then - assertThat(obj).isEqualTo(ANY_OBJECT); - } - - @Test - public void shouldReturnTheCount() { - // given - RankableObjectWithFields r = new RankableObjectWithFields(ANY_OBJECT, ANY_COUNT, ANY_FIELD); - - // when - long count = r.getCount(); - - // then - assertThat(count).isEqualTo(ANY_COUNT); - } - - @DataProvider - public Object[][] fieldsData() { - return new Object[][]{ { ANY_OBJECT, ANY_COUNT, new Object[]{ ANY_FIELD } }, - { "quux", 42L, new Object[]{ "one", "two", "three" } } }; - } - - @Test(dataProvider = "fieldsData") - public void shouldReturnTheFields(Object obj, long count, Object[] fields) { - // given - RankableObjectWithFields r = new RankableObjectWithFields(obj, count, fields); - - // when - List actualFields = r.getFields(); - - // then - assertThat(actualFields).isEqualTo(Lists.newArrayList(fields)); - } - - @Test(expectedExceptions = UnsupportedOperationException.class) - public void fieldsShouldBeImmutable() { - // given - RankableObjectWithFields r = new RankableObjectWithFields(ANY_OBJECT, ANY_COUNT, ANY_FIELD); - - // when - List fields = r.getFields(); - // try to modify the list, which should fail - fields.remove(0); - - // then (exception) - } - - @Test - public void shouldCreateRankableObjectFromTuple() { - // given - Tuple tuple = mock(Tuple.class); - List tupleValues = Lists.newArrayList(ANY_OBJECT, ANY_COUNT, ANY_FIELD); - when(tuple.getValues()).thenReturn(tupleValues); - - // when - RankableObjectWithFields r = RankableObjectWithFields.from(tuple); - - // then - assertThat(r.getObject()).isEqualTo(ANY_OBJECT); - assertThat(r.getCount()).isEqualTo(ANY_COUNT); - List fields = new ArrayList(); - fields.add(ANY_FIELD); - assertThat(r.getFields()).isEqualTo(fields); - - } - - @DataProvider - public Object[][] copyData() { - return new Object[][]{ { new RankableObjectWithFields("foo", 0) }, { new RankableObjectWithFields("foo", 3, - "someOtherField") }, { new RankableObjectWithFields("foo", 0, "someField") } }; - } - - // TODO: What would be a good test to ensure that RankableObjectWithFields is at least somewhat defensively copied? - // The contract of Rankable#copy() returns a Rankable value, not a RankableObjectWithFields. - @Test(dataProvider = "copyData") - public void copyShouldReturnCopy(RankableObjectWithFields original) { - // given - - // when - Rankable copy = original.copy(); - - // then - assertThat(copy.getObject()).isEqualTo(original.getObject()); - assertThat(copy.getCount()).isEqualTo(original.getCount()); - } - -} diff --git a/examples/storm-starter/test/jvm/storm/starter/tools/RankingsTest.java b/examples/storm-starter/test/jvm/storm/starter/tools/RankingsTest.java deleted file mode 100644 index cab02cbe111..00000000000 --- a/examples/storm-starter/test/jvm/storm/starter/tools/RankingsTest.java +++ /dev/null @@ -1,368 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter.tools; - -import com.google.common.base.Throwables; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; -import org.jmock.lib.concurrent.Blitzer; -import org.testng.annotations.DataProvider; -import org.testng.annotations.Test; - -import java.util.List; - -import static org.fest.assertions.api.Assertions.assertThat; - -public class RankingsTest { - - private static final int ANY_TOPN = 42; - private static final Rankable ANY_RANKABLE = new RankableObjectWithFields("someObject", ANY_TOPN); - private static final Rankable ZERO = new RankableObjectWithFields("ZERO_COUNT", 0); - private static final Rankable A = new RankableObjectWithFields("A", 1); - private static final Rankable B = new RankableObjectWithFields("B", 2); - private static final Rankable C = new RankableObjectWithFields("C", 3); - private static final Rankable D = new RankableObjectWithFields("D", 4); - private static final Rankable E = new RankableObjectWithFields("E", 5); - private static final Rankable F = new RankableObjectWithFields("F", 6); - private static final Rankable G = new RankableObjectWithFields("G", 7); - private static final Rankable H = new RankableObjectWithFields("H", 8); - - @DataProvider - public Object[][] illegalTopNData() { - return new Object[][]{ { 0 }, { -1 }, { -2 }, { -10 } }; - } - - @Test(expectedExceptions = IllegalArgumentException.class, dataProvider = "illegalTopNData") - public void constructorWithNegativeOrZeroTopNShouldThrowIAE(int topN) { - new Rankings(topN); - } - - @DataProvider - public Object[][] copyRankingsData() { - return new Object[][]{ { 5, Lists.newArrayList(A, B, C) }, { 2, Lists.newArrayList(A, B, C, D) }, - { 1, Lists.newArrayList() }, { 1, Lists.newArrayList(A) }, { 1, Lists.newArrayList(A, B) } }; - } - - @Test(dataProvider = "copyRankingsData") - public void copyConstructorShouldReturnCopy(int topN, List rankables) { - // given - Rankings rankings = new Rankings(topN); - for (Rankable r : rankables) { - rankings.updateWith(r); - } - - // when - Rankings copy = new Rankings(rankings); - - // then - assertThat(copy.maxSize()).isEqualTo(rankings.maxSize()); - assertThat(copy.getRankings()).isEqualTo(rankings.getRankings()); - } - - @DataProvider - public Object[][] defensiveCopyRankingsData() { - return new Object[][]{ { 5, Lists.newArrayList(A, B, C), Lists.newArrayList(D) }, { 2, Lists.newArrayList(A, B, C, - D), Lists.newArrayList(E, F) }, { 1, Lists.newArrayList(), Lists.newArrayList(A) }, { 1, Lists.newArrayList(A), - Lists.newArrayList(B) }, { 1, Lists.newArrayList(ZERO), Lists.newArrayList(B) }, { 1, Lists.newArrayList(ZERO), - Lists.newArrayList() } }; - } - - @Test(dataProvider = "defensiveCopyRankingsData") - public void copyConstructorShouldReturnDefensiveCopy(int topN, List rankables, List changes) { - // given - Rankings original = new Rankings(topN); - for (Rankable r : rankables) { - original.updateWith(r); - } - int expSize = original.size(); - List expRankings = original.getRankings(); - - // when - Rankings copy = new Rankings(original); - for (Rankable r : changes) { - copy.updateWith(r); - } - - // then - assertThat(original.size()).isEqualTo(expSize); - assertThat(original.getRankings()).isEqualTo(expRankings); - } - - @DataProvider - public Object[][] legalTopNData() { - return new Object[][]{ { 1 }, { 2 }, { 1000 }, { 1000000 } }; - } - - @Test(dataProvider = "legalTopNData") - public void constructorWithPositiveTopNShouldBeOk(int topN) { - // given/when - Rankings rankings = new Rankings(topN); - - // then - assertThat(rankings.maxSize()).isEqualTo(topN); - } - - @Test - public void shouldHaveDefaultConstructor() { - new Rankings(); - } - - @Test - public void defaultConstructorShouldSetPositiveTopN() { - // given/when - Rankings rankings = new Rankings(); - - // then - assertThat(rankings.maxSize()).isGreaterThan(0); - } - - @DataProvider - public Object[][] rankingsGrowData() { - return new Object[][]{ { 2, Lists.newArrayList(new RankableObjectWithFields("A", 1), new RankableObjectWithFields( - "B", 2), new RankableObjectWithFields("C", 3)) }, { 2, Lists.newArrayList(new RankableObjectWithFields("A", 1), - new RankableObjectWithFields("B", 2), new RankableObjectWithFields("C", 3), new RankableObjectWithFields("D", - 4)) } }; - } - - @Test(dataProvider = "rankingsGrowData") - public void sizeOfRankingsShouldNotGrowBeyondTopN(int topN, List rankables) { - // sanity check of the provided test data - assertThat(rankables.size()).overridingErrorMessage( - "The supplied test data is not correct: the number of rankables <%d> should be greater than <%d>", - rankables.size(), topN).isGreaterThan(topN); - - // given - Rankings rankings = new Rankings(topN); - - // when - for (Rankable r : rankables) { - rankings.updateWith(r); - } - - // then - assertThat(rankings.size()).isLessThanOrEqualTo(rankings.maxSize()); - } - - @DataProvider - public Object[][] simulatedRankingsData() { - return new Object[][]{ { Lists.newArrayList(A), Lists.newArrayList(A) }, { Lists.newArrayList(B, D, A, C), - Lists.newArrayList(D, C, B, A) }, { Lists.newArrayList(B, F, A, C, D, E), Lists.newArrayList(F, E, D, C, B, - A) }, { Lists.newArrayList(G, B, F, A, C, D, E, H), Lists.newArrayList(H, G, F, E, D, C, B, A) } }; - } - - @Test(dataProvider = "simulatedRankingsData") - public void shouldCorrectlyRankWhenUpdatedWithRankables(List unsorted, List expSorted) { - // given - Rankings rankings = new Rankings(unsorted.size()); - - // when - for (Rankable r : unsorted) { - rankings.updateWith(r); - } - - // then - assertThat(rankings.getRankings()).isEqualTo(expSorted); - } - - @Test(dataProvider = "simulatedRankingsData") - public void shouldCorrectlyRankWhenEmptyAndUpdatedWithOtherRankings(List unsorted, - List expSorted) { - // given - Rankings rankings = new Rankings(unsorted.size()); - Rankings otherRankings = new Rankings(rankings.maxSize()); - for (Rankable r : unsorted) { - otherRankings.updateWith(r); - } - - // when - rankings.updateWith(otherRankings); - - // then - assertThat(rankings.getRankings()).isEqualTo(expSorted); - } - - @Test(dataProvider = "simulatedRankingsData") - public void shouldCorrectlyRankWhenUpdatedWithEmptyOtherRankings(List unsorted, List expSorted) { - // given - Rankings rankings = new Rankings(unsorted.size()); - for (Rankable r : unsorted) { - rankings.updateWith(r); - } - Rankings emptyRankings = new Rankings(ANY_TOPN); - - // when - rankings.updateWith(emptyRankings); - - // then - assertThat(rankings.getRankings()).isEqualTo(expSorted); - } - - @DataProvider - public Object[][] simulatedRankingsAndOtherRankingsData() { - return new Object[][]{ { Lists.newArrayList(A), Lists.newArrayList(A), Lists.newArrayList(A) }, - { Lists.newArrayList(A, C), Lists.newArrayList(B, D), Lists.newArrayList(D, C, B, A) }, { Lists.newArrayList(B, - F, A), Lists.newArrayList(C, D, E), Lists.newArrayList(F, E, D, C, B, A) }, { Lists.newArrayList(G, B, F, A, C), - Lists.newArrayList(D, E, H), Lists.newArrayList(H, G, F, E, D, C, B, A) } }; - } - - @Test(dataProvider = "simulatedRankingsAndOtherRankingsData") - public void shouldCorrectlyRankWhenNotEmptyAndUpdatedWithOtherRankings(List unsorted, - List unsortedForOtherRankings, List expSorted) { - // given - Rankings rankings = new Rankings(expSorted.size()); - for (Rankable r : unsorted) { - rankings.updateWith(r); - } - Rankings otherRankings = new Rankings(unsortedForOtherRankings.size()); - for (Rankable r : unsortedForOtherRankings) { - otherRankings.updateWith(r); - } - - // when - rankings.updateWith(otherRankings); - - // then - assertThat(rankings.getRankings()).isEqualTo(expSorted); - } - - @DataProvider - public Object[][] duplicatesData() { - Rankable A1 = new RankableObjectWithFields("A", 1); - Rankable A2 = new RankableObjectWithFields("A", 2); - Rankable A3 = new RankableObjectWithFields("A", 3); - return new Object[][]{ { Lists.newArrayList(ANY_RANKABLE, ANY_RANKABLE, ANY_RANKABLE) }, { Lists.newArrayList(A1, - A2, A3) }, }; - } - - @Test(dataProvider = "duplicatesData") - public void shouldNotRankDuplicateObjectsMoreThanOnce(List duplicates) { - // given - Rankings rankings = new Rankings(duplicates.size()); - - // when - for (Rankable r : duplicates) { - rankings.updateWith(r); - } - - // then - assertThat(rankings.size()).isEqualTo(1); - } - - @DataProvider - public Object[][] removeZeroRankingsData() { - return new Object[][]{ { Lists.newArrayList(A, ZERO), Lists.newArrayList(A) }, { Lists.newArrayList(A), - Lists.newArrayList(A) }, { Lists.newArrayList(ZERO, A), Lists.newArrayList(A) }, { Lists.newArrayList(ZERO), - Lists.newArrayList() }, { Lists.newArrayList(ZERO, new RankableObjectWithFields("ZERO2", 0)), - Lists.newArrayList() }, { Lists.newArrayList(B, ZERO, new RankableObjectWithFields("ZERO2", 0), D, - new RankableObjectWithFields("ZERO3", 0), new RankableObjectWithFields("ZERO4", 0), C), Lists.newArrayList(D, C, - B) }, { Lists.newArrayList(A, ZERO, B), Lists.newArrayList(B, A) } }; - } - - @Test(dataProvider = "removeZeroRankingsData") - public void shouldRemoveZeroCounts(List unsorted, List expSorted) { - // given - Rankings rankings = new Rankings(unsorted.size()); - for (Rankable r : unsorted) { - rankings.updateWith(r); - } - - // when - rankings.pruneZeroCounts(); - - // then - assertThat(rankings.getRankings()).isEqualTo(expSorted); - } - - @Test - public void updatingWithNewRankablesShouldBeThreadSafe() throws InterruptedException { - // given - final List entries = ImmutableList.of(A, B, C, D); - final Rankings rankings = new Rankings(entries.size()); - - // We are capturing exceptions thrown in Blitzer's child threads into this data structure so that we can properly - // pass/fail this test. The reason is that Blitzer doesn't report exceptions, which is a known bug in Blitzer - // (JMOCK-263). See https://github.com/jmock-developers/jmock-library/issues/22 for more information. - final List exceptions = Lists.newArrayList(); - Blitzer blitzer = new Blitzer(1000); - - // when - blitzer.blitz(new Runnable() { - public void run() { - for (Rankable r : entries) { - try { - rankings.updateWith(r); - } - catch (RuntimeException e) { - synchronized(exceptions) { - exceptions.add(e); - } - } - } - } - }); - blitzer.shutdown(); - - // then - // - if (!exceptions.isEmpty()) { - for (Exception e : exceptions) { - System.err.println(Throwables.getStackTraceAsString(e)); - } - } - assertThat(exceptions).isEmpty(); - } - - @Test(dataProvider = "copyRankingsData") - public void copyShouldReturnCopy(int topN, List rankables) { - // given - Rankings rankings = new Rankings(topN); - for (Rankable r : rankables) { - rankings.updateWith(r); - } - - // when - Rankings copy = rankings.copy(); - - // then - assertThat(copy.maxSize()).isEqualTo(rankings.maxSize()); - assertThat(copy.getRankings()).isEqualTo(rankings.getRankings()); - } - - @Test(dataProvider = "defensiveCopyRankingsData") - public void copyShouldReturnDefensiveCopy(int topN, List rankables, List changes) { - // given - Rankings original = new Rankings(topN); - for (Rankable r : rankables) { - original.updateWith(r); - } - int expSize = original.size(); - List expRankings = original.getRankings(); - - // when - Rankings copy = original.copy(); - for (Rankable r : changes) { - copy.updateWith(r); - } - copy.pruneZeroCounts(); - - // then - assertThat(original.size()).isEqualTo(expSize); - assertThat(original.getRankings()).isEqualTo(expRankings); - } - -} \ No newline at end of file diff --git a/examples/storm-starter/test/jvm/storm/starter/tools/SlidingWindowCounterTest.java b/examples/storm-starter/test/jvm/storm/starter/tools/SlidingWindowCounterTest.java deleted file mode 100644 index 920bf017e83..00000000000 --- a/examples/storm-starter/test/jvm/storm/starter/tools/SlidingWindowCounterTest.java +++ /dev/null @@ -1,106 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter.tools; - -import org.testng.annotations.DataProvider; -import org.testng.annotations.Test; - -import java.util.Map; - -import static org.fest.assertions.api.Assertions.assertThat; - -public class SlidingWindowCounterTest { - - private static final int ANY_WINDOW_LENGTH_IN_SLOTS = 2; - private static final Object ANY_OBJECT = "ANY_OBJECT"; - - @DataProvider - public Object[][] illegalWindowLengths() { - return new Object[][]{ { -10 }, { -3 }, { -2 }, { -1 }, { 0 }, { 1 } }; - } - - @Test(expectedExceptions = IllegalArgumentException.class, dataProvider = "illegalWindowLengths") - public void lessThanTwoSlotsShouldThrowIAE(int windowLengthInSlots) { - new SlidingWindowCounter(windowLengthInSlots); - } - - @DataProvider - public Object[][] legalWindowLengths() { - return new Object[][]{ { 2 }, { 3 }, { 20 } }; - } - - @Test(dataProvider = "legalWindowLengths") - public void twoOrMoreSlotsShouldBeValid(int windowLengthInSlots) { - new SlidingWindowCounter(windowLengthInSlots); - } - - @Test - public void newInstanceShouldHaveEmptyCounts() { - // given - SlidingWindowCounter counter = new SlidingWindowCounter(ANY_WINDOW_LENGTH_IN_SLOTS); - - // when - Map counts = counter.getCountsThenAdvanceWindow(); - - // then - assertThat(counts).isEmpty(); - } - - @DataProvider - public Object[][] simulatedCounterIterations() { - return new Object[][]{ { 2, new int[]{ 3, 2, 0, 0, 1, 0, 0, 0 }, new long[]{ 3, 5, 2, 0, 1, 1, 0, 0 } }, - { 3, new int[]{ 3, 2, 0, 0, 1, 0, 0, 0 }, new long[]{ 3, 5, 5, 2, 1, 1, 1, 0 } }, - { 4, new int[]{ 3, 2, 0, 0, 1, 0, 0, 0 }, new long[]{ 3, 5, 5, 5, 3, 1, 1, 1 } }, - { 5, new int[]{ 3, 2, 0, 0, 1, 0, 0, 0 }, new long[]{ 3, 5, 5, 5, 6, 3, 1, 1 } }, - { 5, new int[]{ 3, 11, 5, 13, 7, 17, 0, 3, 50, 600, 7000 }, - new long[]{ 3, 14, 19, 32, 39, 53, 42, 40, 77, 670, 7653 } }, }; - } - - @Test(dataProvider = "simulatedCounterIterations") - public void testCounterWithSimulatedRuns(int windowLengthInSlots, int[] incrementsPerIteration, - long[] expCountsPerIteration) { - // given - SlidingWindowCounter counter = new SlidingWindowCounter(windowLengthInSlots); - int numIterations = incrementsPerIteration.length; - - for (int i = 0; i < numIterations; i++) { - int numIncrements = incrementsPerIteration[i]; - long expCounts = expCountsPerIteration[i]; - // Objects are absent if they were zero both this iteration - // and the last -- if only this one, we need to report zero. - boolean expAbsent = ((expCounts == 0) && ((i == 0) || (expCountsPerIteration[i - 1] == 0))); - - // given (for this iteration) - for (int j = 0; j < numIncrements; j++) { - counter.incrementCount(ANY_OBJECT); - } - - // when (for this iteration) - Map counts = counter.getCountsThenAdvanceWindow(); - - // then (for this iteration) - if (expAbsent) { - assertThat(counts).doesNotContainKey(ANY_OBJECT); - } - else { - assertThat(counts.get(ANY_OBJECT)).isEqualTo(expCounts); - } - } - } - -} diff --git a/examples/storm-starter/test/jvm/storm/starter/tools/SlotBasedCounterTest.java b/examples/storm-starter/test/jvm/storm/starter/tools/SlotBasedCounterTest.java deleted file mode 100644 index 3ad042beb86..00000000000 --- a/examples/storm-starter/test/jvm/storm/starter/tools/SlotBasedCounterTest.java +++ /dev/null @@ -1,181 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package storm.starter.tools; - -import org.testng.annotations.DataProvider; -import org.testng.annotations.Test; - -import java.util.Map; - -import static org.fest.assertions.api.Assertions.assertThat; - -public class SlotBasedCounterTest { - - private static final int ANY_NUM_SLOTS = 1; - private static final int ANY_SLOT = 0; - private static final Object ANY_OBJECT = "ANY_OBJECT"; - - @DataProvider - public Object[][] illegalNumSlotsData() { - return new Object[][]{ { -10 }, { -3 }, { -2 }, { -1 }, { 0 } }; - } - - @Test(expectedExceptions = IllegalArgumentException.class, dataProvider = "illegalNumSlotsData") - public void negativeOrZeroNumSlotsShouldThrowIAE(int numSlots) { - new SlotBasedCounter(numSlots); - } - - @DataProvider - public Object[][] legalNumSlotsData() { - return new Object[][]{ { 1 }, { 2 }, { 3 }, { 20 } }; - } - - @Test(dataProvider = "legalNumSlotsData") - public void positiveNumSlotsShouldBeOk(int numSlots) { - new SlotBasedCounter(numSlots); - } - - @Test - public void newInstanceShouldHaveEmptyCounts() { - // given - SlotBasedCounter counter = new SlotBasedCounter(ANY_NUM_SLOTS); - - // when - Map counts = counter.getCounts(); - - // then - assertThat(counts).isEmpty(); - } - - @Test - public void shouldReturnNonEmptyCountsWhenAtLeastOneObjectWasCounted() { - // given - SlotBasedCounter counter = new SlotBasedCounter(ANY_NUM_SLOTS); - counter.incrementCount(ANY_OBJECT, ANY_SLOT); - - // when - Map counts = counter.getCounts(); - - // then - assertThat(counts).isNotEmpty(); - - // additional tests that go beyond what this test is primarily about - assertThat(counts.size()).isEqualTo(1); - assertThat(counts.get(ANY_OBJECT)).isEqualTo(1); - } - - @DataProvider - public Object[][] incrementCountData() { - return new Object[][]{ { new String[]{ "foo", "bar" }, new int[]{ 3, 2 } } }; - } - - @Test(dataProvider = "incrementCountData") - public void shouldIncrementCount(Object[] objects, int[] expCounts) { - // given - SlotBasedCounter counter = new SlotBasedCounter(ANY_NUM_SLOTS); - - // when - for (int i = 0; i < objects.length; i++) { - Object obj = objects[i]; - int numIncrements = expCounts[i]; - for (int j = 0; j < numIncrements; j++) { - counter.incrementCount(obj, ANY_SLOT); - } - } - - // then - for (int i = 0; i < objects.length; i++) { - assertThat(counter.getCount(objects[i], ANY_SLOT)).isEqualTo(expCounts[i]); - } - assertThat(counter.getCount("nonexistentObject", ANY_SLOT)).isEqualTo(0); - } - - @Test - public void shouldReturnZeroForNonexistentObject() { - // given - SlotBasedCounter counter = new SlotBasedCounter(ANY_NUM_SLOTS); - - // when - counter.incrementCount("somethingElse", ANY_SLOT); - - // then - assertThat(counter.getCount("nonexistentObject", ANY_SLOT)).isEqualTo(0); - } - - @Test - public void shouldIncrementCountOnlyOneSlotAtATime() { - // given - int numSlots = 3; - Object obj = Long.valueOf(10); - SlotBasedCounter counter = new SlotBasedCounter(numSlots); - - // when (empty) - // then - assertThat(counter.getCount(obj, 0)).isEqualTo(0); - assertThat(counter.getCount(obj, 1)).isEqualTo(0); - assertThat(counter.getCount(obj, 2)).isEqualTo(0); - - // when - counter.incrementCount(obj, 1); - - // then - assertThat(counter.getCount(obj, 0)).isEqualTo(0); - assertThat(counter.getCount(obj, 1)).isEqualTo(1); - assertThat(counter.getCount(obj, 2)).isEqualTo(0); - } - - @Test - public void wipeSlotShouldSetAllCountsInSlotToZero() { - // given - SlotBasedCounter counter = new SlotBasedCounter(ANY_NUM_SLOTS); - Object countWasOne = "countWasOne"; - Object countWasThree = "countWasThree"; - counter.incrementCount(countWasOne, ANY_SLOT); - counter.incrementCount(countWasThree, ANY_SLOT); - counter.incrementCount(countWasThree, ANY_SLOT); - counter.incrementCount(countWasThree, ANY_SLOT); - - // when - counter.wipeSlot(ANY_SLOT); - - // then - assertThat(counter.getCount(countWasOne, ANY_SLOT)).isEqualTo(0); - assertThat(counter.getCount(countWasThree, ANY_SLOT)).isEqualTo(0); - } - - @Test - public void wipeZerosShouldRemoveAnyObjectsWithZeroTotalCount() { - // given - SlotBasedCounter counter = new SlotBasedCounter(2); - int wipeSlot = 0; - int otherSlot = 1; - Object willBeRemoved = "willBeRemoved"; - Object willContinueToBeTracked = "willContinueToBeTracked"; - counter.incrementCount(willBeRemoved, wipeSlot); - counter.incrementCount(willContinueToBeTracked, wipeSlot); - counter.incrementCount(willContinueToBeTracked, otherSlot); - - // when - counter.wipeSlot(wipeSlot); - counter.wipeZeros(); - - // then - assertThat(counter.getCounts()).doesNotContainKey(willBeRemoved); - assertThat(counter.getCounts()).containsKey(willContinueToBeTracked); - } -} diff --git a/external/README.md b/external/README.md new file mode 100644 index 00000000000..d99cc4fea9b --- /dev/null +++ b/external/README.md @@ -0,0 +1,20 @@ +# About Storm External Modules + +## What is "external"? + +"external" is a group of modules that while not required for storm to operate, are useful for extending Storm in order to +provide additional functionality such as integration with other technologies frequently used in combination with Storm. + +External modules are released in tandem with Storm in order to maintain version compatibility. + +Each external module is assigned one or more "Committer Sponsors" who have expressed interest in supporting the module. + +## What is a Committer Sponsor? + +A Committer Sponsor is simply an Apache Storm Committer who has expressed interest in supporting a module in one way or +another. For any given module, we hope to have at least one sponsor to provide some level of protection against code rot +and abandonware. + +Note that this in no way implies that Committer Sponsors have any special roles, privileges, or obligations. The Apache +Storm Committers have equal authority over and responsibility for the entire codebase. A Committer Sponsor is +essentially just a Committer who has said: "I'm interested in this module, and am willing to help where I can." diff --git a/external/storm-autocreds/pom.xml b/external/storm-autocreds/pom.xml new file mode 100644 index 00000000000..22b9f26dbe5 --- /dev/null +++ b/external/storm-autocreds/pom.xml @@ -0,0 +1,124 @@ + + + + + storm + org.apache.storm + 2.8.4-SNAPSHOT + ../../pom.xml + + 4.0.0 + + storm-autocreds + + + + org.apache.storm + storm-client + ${project.version} + ${provided.scope} + + + + org.slf4j + log4j-over-slf4j + + + + + org.apache.hadoop + hadoop-auth + + + ch.qos.reload4j + reload4j + + + org.slf4j + slf4j-reload4j + + + org.apache.zookeeper + zookeeper + + + org.apache.curator + apache-curator + + + org.apache.curator + curator-framework + + + org.apache.kerby + kerb-admin + + + + + org.apache.hbase + hbase-client + ${hbase.version} + + + log4j + log4j + + + org.slf4j + slf4j-log4j12 + + + org.apache.zookeeper + zookeeper + + + + junit + junit + + + org.bouncycastle + bcpkix-jdk15on + + + org.bouncycastle + bcprov-jdk15on + + + + + javax.xml.bind + jaxb-api + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + + + org.apache.maven.plugins + maven-pmd-plugin + + + + diff --git a/external/storm-autocreds/src/main/java/org/apache/storm/common/AbstractHadoopAutoCreds.java b/external/storm-autocreds/src/main/java/org/apache/storm/common/AbstractHadoopAutoCreds.java new file mode 100644 index 00000000000..80aef42dfed --- /dev/null +++ b/external/storm-autocreds/src/main/java/org/apache/storm/common/AbstractHadoopAutoCreds.java @@ -0,0 +1,150 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.common; + +import java.io.IOException; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import javax.security.auth.Subject; +import javax.xml.bind.DatatypeConverter; + +import org.apache.commons.math3.util.Pair; +import org.apache.hadoop.security.Credentials; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.storm.security.auth.IAutoCredentials; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * The base class that for auto credential plugins that abstracts out some of the common functionality. + */ +public abstract class AbstractHadoopAutoCreds implements IAutoCredentials, CredentialKeyProvider { + private static final Logger LOG = LoggerFactory.getLogger(AbstractHadoopAutoCreds.class); + + private Set configKeys = new HashSet<>(); + + @Override + public void prepare(Map topoConf) { + doPrepare(topoConf); + loadConfigKeys(topoConf); + } + + @Override + public void populateCredentials(Map credentials) { + credentials.put(getCredentialKey(""), + DatatypeConverter.printBase64Binary("dummy place holder".getBytes())); + } + + /** + * {@inheritDoc} + */ + @Override + public void populateSubject(Subject subject, Map credentials) { + addCredentialToSubject(subject, credentials); + addTokensToUgi(subject); + } + + /** + * {@inheritDoc} + */ + @Override + public void updateSubject(Subject subject, Map credentials) { + addCredentialToSubject(subject, credentials); + addTokensToUgi(subject); + } + + public Set> getCredentials(Map credentials) { + return HadoopCredentialUtil.getCredential(this, credentials, configKeys); + } + + /** + * Prepare the plugin. + * + * @param topoConf the topology conf + */ + protected abstract void doPrepare(Map topoConf); + + /** + * The lookup key for the config key string. + * + * @return the config key string + */ + protected abstract String getConfigKeyString(); + + @SuppressWarnings("unchecked") + private void addCredentialToSubject(Subject subject, Map credentials) { + try { + for (Pair cred : getCredentials(credentials)) { + subject.getPrivateCredentials().add(cred.getSecond()); + LOG.info("Credentials added to the subject."); + } + } catch (Exception e) { + LOG.error("Failed to initialize and get UserGroupInformation.", e); + } + } + + private void addTokensToUgi(Subject subject) { + if (subject != null) { + Set privateCredentials = subject.getPrivateCredentials(Credentials.class); + if (privateCredentials != null) { + for (Credentials cred : privateCredentials) { + Collection> allTokens = cred.getAllTokens(); + if (allTokens != null) { + for (Token token : allTokens) { + try { + if (token == null) { + LOG.debug("Ignoring null token"); + continue; + } + + LOG.debug("Current user: {}", UserGroupInformation.getCurrentUser()); + LOG.debug("Token from Credentials : {}", token); + + TokenIdentifier tokenId = token.decodeIdentifier(); + if (tokenId != null) { + LOG.debug("Token identifier : {}", tokenId); + LOG.debug("Username in token identifier : {}", tokenId.getUser()); + } + + UserGroupInformation.getCurrentUser().addToken(token); + LOG.info("Added delegation tokens to UGI."); + } catch (IOException e) { + LOG.error("Exception while trying to add tokens to ugi", e); + } + } + } + } + } + } + } + + private void loadConfigKeys(Map conf) { + List keys; + String configKeyString = getConfigKeyString(); + if ((keys = (List) conf.get(configKeyString)) != null) { + configKeys.addAll(keys); + } + } + +} \ No newline at end of file diff --git a/external/storm-autocreds/src/main/java/org/apache/storm/common/AbstractHadoopNimbusPluginAutoCreds.java b/external/storm-autocreds/src/main/java/org/apache/storm/common/AbstractHadoopNimbusPluginAutoCreds.java new file mode 100644 index 00000000000..ad3bd17886f --- /dev/null +++ b/external/storm-autocreds/src/main/java/org/apache/storm/common/AbstractHadoopNimbusPluginAutoCreds.java @@ -0,0 +1,133 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.common; + +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import javax.xml.bind.DatatypeConverter; + +import org.apache.commons.math3.util.Pair; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.security.Credentials; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.storm.security.INimbusCredentialPlugin; +import org.apache.storm.security.auth.ICredentialsRenewer; +import org.apache.storm.utils.ConfigUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * The base class that for auto credential plugins that abstracts out some of the common functionality. + */ +public abstract class AbstractHadoopNimbusPluginAutoCreds + implements INimbusCredentialPlugin, ICredentialsRenewer, CredentialKeyProvider { + private static final Logger LOG = LoggerFactory.getLogger(AbstractHadoopNimbusPluginAutoCreds.class); + public static final String CONFIG_KEY_RESOURCES = "resources"; + + @Override + public void prepare(Map conf) { + doPrepare(conf); + } + + @Override + public void populateCredentials(Map credentials, + Map topologyConf, + final String topologyOwnerPrincipal) { + try { + List configKeys = getConfigKeys(topologyConf); + if (!configKeys.isEmpty()) { + for (String configKey : configKeys) { + credentials.put(getCredentialKey(configKey), + DatatypeConverter.printBase64Binary(getHadoopCredentials(topologyConf, configKey))); + } + } else { + credentials.put(getCredentialKey(""), + DatatypeConverter.printBase64Binary(getHadoopCredentials(topologyConf, topologyOwnerPrincipal))); + } + LOG.info("Tokens added to credentials map."); + } catch (Exception e) { + LOG.error("Could not populate credentials.", e); + } + } + + @Override + public void renew(Map credentials, Map topologyConf, final String topologyOwnerPrincipal) { + doRenew(credentials, topologyConf, topologyOwnerPrincipal); + } + + protected Set> getCredentials(Map credentials, + List configKeys) { + return HadoopCredentialUtil.getCredential(this, credentials, configKeys); + } + + protected void fillHadoopConfiguration(Map topologyConf, String configKey, Configuration configuration) { + Map config = (Map) topologyConf.get(configKey); + LOG.info("TopoConf {}, got config {}, for configKey {}", ConfigUtils.maskPasswords(topologyConf), + ConfigUtils.maskPasswords(config), configKey); + if (config != null) { + List resourcesToLoad = new ArrayList<>(); + for (Map.Entry entry : config.entrySet()) { + if (entry.getKey().equals(CONFIG_KEY_RESOURCES)) { + resourcesToLoad.addAll((List) entry.getValue()); + } else { + configuration.set(entry.getKey(), String.valueOf(entry.getValue())); + } + } + LOG.info("Resources to load {}", resourcesToLoad); + // add configs from resources like hdfs-site.xml + for (String pathStr : resourcesToLoad) { + configuration.addResource(new Path(Paths.get(pathStr).toUri())); + } + } + LOG.info("Initializing UGI with config {}", configuration); + UserGroupInformation.setConfiguration(configuration); + } + + /** + * Prepare the plugin. + * + * @param conf the storm cluster conf set via storm.yaml + */ + protected abstract void doPrepare(Map conf); + + /** + * The lookup key for the config key string. + * + * @return the config key string + */ + protected abstract String getConfigKeyString(); + + protected abstract byte[] getHadoopCredentials(Map topologyConf, String configKey, String topologyOwnerPrincipal); + + protected abstract byte[] getHadoopCredentials(Map topologyConf, String topologyOwnerPrincipal); + + protected abstract void doRenew(Map credentials, Map topologyConf, String topologyOwnerPrincipal); + + protected List getConfigKeys(Map conf) { + String configKeyString = getConfigKeyString(); + List configKeys = (List) conf.get(configKeyString); + return configKeys != null ? configKeys : Collections.emptyList(); + } + +} diff --git a/external/storm-autocreds/src/main/java/org/apache/storm/common/CredentialKeyProvider.java b/external/storm-autocreds/src/main/java/org/apache/storm/common/CredentialKeyProvider.java new file mode 100644 index 00000000000..a3503a54ee9 --- /dev/null +++ b/external/storm-autocreds/src/main/java/org/apache/storm/common/CredentialKeyProvider.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.common; + +/** + * Provider interface for credential key. + */ +public interface CredentialKeyProvider { + /** + * The lookup key for the config key string. + * + * @return the config key string + */ + String getCredentialKey(String configKey); +} diff --git a/external/storm-autocreds/src/main/java/org/apache/storm/common/HadoopCredentialUtil.java b/external/storm-autocreds/src/main/java/org/apache/storm/common/HadoopCredentialUtil.java new file mode 100644 index 00000000000..8e7c64bc2f1 --- /dev/null +++ b/external/storm-autocreds/src/main/java/org/apache/storm/common/HadoopCredentialUtil.java @@ -0,0 +1,82 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.common; + +import java.io.ByteArrayInputStream; +import java.io.ObjectInputStream; +import java.util.Collection; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import javax.xml.bind.DatatypeConverter; + +import org.apache.commons.math3.util.Pair; +import org.apache.hadoop.security.Credentials; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Utility class for getting credential for Hadoop. + */ +final class HadoopCredentialUtil { + private static final Logger LOG = LoggerFactory.getLogger(HadoopCredentialUtil.class); + + private HadoopCredentialUtil() { + } + + static Set> getCredential(CredentialKeyProvider provider, + Map credentials, + Collection configKeys) { + Set> res = new HashSet<>(); + if (!configKeys.isEmpty()) { + for (String configKey : configKeys) { + Credentials cred = doGetCredentials(provider, credentials, configKey); + if (cred != null) { + res.add(new Pair(configKey, cred)); + } + } + } else { + Credentials cred = doGetCredentials(provider, credentials, ""); + if (cred != null) { + res.add(new Pair("", cred)); + } + } + return res; + } + + private static Credentials doGetCredentials(CredentialKeyProvider provider, + Map credentials, + String configKey) { + Credentials credential = null; + String credentialKey = provider.getCredentialKey(configKey); + if (credentials != null && credentials.containsKey(credentialKey)) { + try { + byte[] credBytes = DatatypeConverter.parseBase64Binary(credentialKey); + ObjectInputStream in = new ObjectInputStream(new ByteArrayInputStream(credBytes)); + + credential = new Credentials(); + credential.readFields(in); + } catch (Exception e) { + LOG.error("Could not obtain credentials from credentials map.", e); + } + } + return credential; + } + +} diff --git a/external/storm-autocreds/src/main/java/org/apache/storm/hbase/security/AutoHBase.java b/external/storm-autocreds/src/main/java/org/apache/storm/hbase/security/AutoHBase.java new file mode 100644 index 00000000000..549f2d94f5f --- /dev/null +++ b/external/storm-autocreds/src/main/java/org/apache/storm/hbase/security/AutoHBase.java @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.hbase.security; + +import static org.apache.storm.hbase.security.HBaseSecurityUtil.HBASE_CREDENTIALS; + +import java.util.Map; + +import org.apache.storm.common.AbstractHadoopAutoCreds; + +/** + * Auto credentials plugin for HBase implementation. This class provides a way to automatically + * push credentials to a topology and to retrieve them in the worker. + */ +public class AutoHBase extends AbstractHadoopAutoCreds { + @Override + public void doPrepare(Map conf) { + } + + @Override + protected String getConfigKeyString() { + return HBaseSecurityUtil.HBASE_CREDENTIALS_CONFIG_KEYS; + } + + @Override + public String getCredentialKey(String configKey) { + return HBASE_CREDENTIALS + configKey; + } +} + diff --git a/external/storm-autocreds/src/main/java/org/apache/storm/hbase/security/AutoHBaseCommand.java b/external/storm-autocreds/src/main/java/org/apache/storm/hbase/security/AutoHBaseCommand.java new file mode 100644 index 00000000000..518f2130b03 --- /dev/null +++ b/external/storm-autocreds/src/main/java/org/apache/storm/hbase/security/AutoHBaseCommand.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.hbase.security; + +import static org.apache.storm.hbase.security.HBaseSecurityUtil.HBASE_KEYTAB_FILE_KEY; +import static org.apache.storm.hbase.security.HBaseSecurityUtil.HBASE_PRINCIPAL_KEY; + +import java.util.HashMap; +import java.util.Map; +import javax.security.auth.Subject; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Command tool of Hive credential renewer. + */ +public final class AutoHBaseCommand { + private static final Logger LOG = LoggerFactory.getLogger(AutoHBaseCommand.class); + + private AutoHBaseCommand() { + } + + @SuppressWarnings("unchecked") + public static void main(String[] args) throws Exception { + Map conf = new HashMap<>(); + conf.put(HBASE_PRINCIPAL_KEY, args[1]); // hbase principal storm-hbase@WITZEN.COM + conf.put(HBASE_KEYTAB_FILE_KEY, + args[2]); // storm hbase keytab /etc/security/keytabs/storm-hbase.keytab + + AutoHBase autoHBase = new AutoHBase(); + autoHBase.prepare(conf); + AutoHBaseNimbus autoHBaseNimbus = new AutoHBaseNimbus(); + autoHBaseNimbus.prepare(conf); + + Map creds = new HashMap<>(); + autoHBaseNimbus.populateCredentials(creds, conf, args[0]); //with realm e.g. storm@WITZEND.COM + LOG.info("Got HBase credentials" + autoHBase.getCredentials(creds)); + + Subject s = new Subject(); + autoHBase.populateSubject(s, creds); + LOG.info("Got a Subject " + s); + + autoHBaseNimbus.renew(creds, conf, args[0]); + LOG.info("renewed credentials" + autoHBase.getCredentials(creds)); + } +} diff --git a/external/storm-autocreds/src/main/java/org/apache/storm/hbase/security/AutoHBaseNimbus.java b/external/storm-autocreds/src/main/java/org/apache/storm/hbase/security/AutoHBaseNimbus.java new file mode 100644 index 00000000000..95e312d20b5 --- /dev/null +++ b/external/storm-autocreds/src/main/java/org/apache/storm/hbase/security/AutoHBaseNimbus.java @@ -0,0 +1,141 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.hbase.security; + +import static org.apache.storm.hbase.security.HBaseSecurityUtil.HBASE_CREDENTIALS; +import static org.apache.storm.hbase.security.HBaseSecurityUtil.HBASE_KEYTAB_FILE_KEY; +import static org.apache.storm.hbase.security.HBaseSecurityUtil.HBASE_PRINCIPAL_KEY; + +import java.io.ByteArrayOutputStream; +import java.io.ObjectOutputStream; +import java.net.InetAddress; +import java.util.Map; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.security.UserProvider; +import org.apache.hadoop.hbase.security.token.ClientTokenUtil; +import org.apache.hadoop.security.Credentials; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.storm.common.AbstractHadoopNimbusPluginAutoCreds; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Auto credentials nimbus plugin for HBase implementation. This class automatically + * gets HBase delegation tokens and push it to user's topology. + */ +public class AutoHBaseNimbus extends AbstractHadoopNimbusPluginAutoCreds { + private static final Logger LOG = LoggerFactory.getLogger(AutoHBaseNimbus.class); + + @Override + public void doPrepare(Map conf) { + // we don't allow any cluster wide configuration + } + + @Override + protected String getConfigKeyString() { + return HBaseSecurityUtil.HBASE_CREDENTIALS_CONFIG_KEYS; + } + + @Override + public void shutdown() { + //no op. + } + + @Override + protected byte[] getHadoopCredentials(Map conf, String configKey, final String topologyOwnerPrincipal) { + Configuration configuration = getHadoopConfiguration(conf, configKey); + return getHadoopCredentials(conf, configuration, topologyOwnerPrincipal); + } + + @Override + protected byte[] getHadoopCredentials(Map conf, final String topologyOwnerPrincipal) { + return getHadoopCredentials(conf, HBaseConfiguration.create(), topologyOwnerPrincipal); + } + + @SuppressWarnings("unchecked") + protected byte[] getHadoopCredentials(Map conf, Configuration hbaseConf, final String topologySubmitterUser) { + try { + if (UserGroupInformation.isSecurityEnabled()) { + UserProvider provider = UserProvider.instantiate(hbaseConf); + provider.login(HBASE_KEYTAB_FILE_KEY, HBASE_PRINCIPAL_KEY, InetAddress.getLocalHost().getCanonicalHostName()); + + LOG.info("Logged into Hbase as principal = " + hbaseConf.get(HBASE_PRINCIPAL_KEY)); + + UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); + + final UserGroupInformation proxyUser = UserGroupInformation.createProxyUser(topologySubmitterUser, ugi); + + User user = User.create(proxyUser); + + if (user.isHBaseSecurityEnabled(hbaseConf)) { + final Connection connection = ConnectionFactory.createConnection(hbaseConf, user); + ClientTokenUtil.obtainAndCacheToken(connection, user); + + LOG.info("Obtained HBase tokens, adding to user credentials."); + + Credentials credential = proxyUser.getCredentials(); + + for (Token tokenForLog : credential.getAllTokens()) { + LOG.debug("Obtained token info in credential: {} / {}", + tokenForLog.toString(), tokenForLog.decodeIdentifier().getUser()); + } + + ByteArrayOutputStream bao = new ByteArrayOutputStream(); + ObjectOutputStream out = new ObjectOutputStream(bao); + credential.write(out); + out.flush(); + out.close(); + return bao.toByteArray(); + } else { + throw new RuntimeException("Security is not enabled for HBase."); + } + } else { + throw new RuntimeException("Security is not enabled for Hadoop"); + } + } catch (Exception ex) { + throw new RuntimeException("Failed to get delegation tokens." , ex); + } + } + + private Configuration getHadoopConfiguration(Map topoConf, String configKey) { + Configuration configuration = HBaseConfiguration.create(); + fillHadoopConfiguration(topoConf, configKey, configuration); + return configuration; + } + + @Override + public void doRenew(Map credentials, Map topologyConf, final String topologySubmitterUser) { + //HBASE tokens are not renewable so we always have to get new ones. + populateCredentials(credentials, topologyConf, topologySubmitterUser); + } + + @Override + public String getCredentialKey(String configKey) { + return HBASE_CREDENTIALS + configKey; + } + +} + diff --git a/external/storm-autocreds/src/main/java/org/apache/storm/hbase/security/HBaseSecurityUtil.java b/external/storm-autocreds/src/main/java/org/apache/storm/hbase/security/HBaseSecurityUtil.java new file mode 100644 index 00000000000..1d8e5b865a8 --- /dev/null +++ b/external/storm-autocreds/src/main/java/org/apache/storm/hbase/security/HBaseSecurityUtil.java @@ -0,0 +1,86 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.hbase.security; + +import static org.apache.storm.Config.TOPOLOGY_AUTO_CREDENTIALS; + +import java.io.IOException; +import java.net.InetAddress; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.security.UserProvider; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.storm.security.auth.kerberos.AutoTGT; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This class provides util methods for storm-hbase connector communicating + * with secured HBase. + */ +public class HBaseSecurityUtil { + private static final Logger LOG = LoggerFactory.getLogger(HBaseSecurityUtil.class); + + public static final String STORM_KEYTAB_FILE_KEY = "storm.keytab.file"; + public static final String STORM_USER_NAME_KEY = "storm.kerberos.principal"; + public static final String HBASE_CREDENTIALS_CONFIG_KEYS = "hbaseCredentialsConfigKeys"; + + public static final String HBASE_CREDENTIALS = "HBASE_CREDENTIALS"; + public static final String HBASE_KEYTAB_FILE_KEY = "hbase.keytab.file"; + public static final String HBASE_PRINCIPAL_KEY = "hbase.kerberos.principal"; + + private static volatile UserProvider legacyProvider = null; + + private HBaseSecurityUtil() { + } + + public static UserProvider login(Map conf, Configuration hbaseConfig) throws IOException { + //Allowing keytab based login for backward compatibility. + if (UserGroupInformation.isSecurityEnabled()) { + List autoCredentials = (List) conf.get(TOPOLOGY_AUTO_CREDENTIALS); + if ((autoCredentials == null) + || (!autoCredentials.contains(AutoHBase.class.getName()) && !autoCredentials.contains(AutoTGT.class.getName()))) { + LOG.info("Logging in using keytab as neither AutoHBase or AutoTGT is specified for " + TOPOLOGY_AUTO_CREDENTIALS); + //insure that if keytab is used only one login per process executed + if (legacyProvider == null) { + synchronized (HBaseSecurityUtil.class) { + if (legacyProvider == null) { + legacyProvider = UserProvider.instantiate(hbaseConfig); + String keytab = (String) conf.get(STORM_KEYTAB_FILE_KEY); + if (keytab != null) { + hbaseConfig.set(STORM_KEYTAB_FILE_KEY, keytab); + } + String userName = (String) conf.get(STORM_USER_NAME_KEY); + if (userName != null) { + hbaseConfig.set(STORM_USER_NAME_KEY, userName); + } + legacyProvider.login(STORM_KEYTAB_FILE_KEY, STORM_USER_NAME_KEY, + InetAddress.getLocalHost().getCanonicalHostName()); + } + } + } + } + return legacyProvider; + } else { + return null; + } + } +} diff --git a/external/storm-autocreds/src/main/java/org/apache/storm/hdfs/security/AutoHDFS.java b/external/storm-autocreds/src/main/java/org/apache/storm/hdfs/security/AutoHDFS.java new file mode 100644 index 00000000000..a5d1a03a335 --- /dev/null +++ b/external/storm-autocreds/src/main/java/org/apache/storm/hdfs/security/AutoHDFS.java @@ -0,0 +1,47 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.hdfs.security; + +import static org.apache.storm.hdfs.security.HdfsSecurityUtil.HDFS_CREDENTIALS; + +import java.util.Map; + +import org.apache.storm.common.AbstractHadoopAutoCreds; + +/** + * Auto credentials plugin for HDFS implementation. This class provides a way to automatically + * push credentials to a topology and to retrieve them in the worker. + */ +@SuppressWarnings("checkstyle:AbbreviationAsWordInName") +public class AutoHDFS extends AbstractHadoopAutoCreds { + @Override + public void doPrepare(Map conf) { + } + + @Override + protected String getConfigKeyString() { + return HdfsSecurityUtil.HDFS_CREDENTIALS_CONFIG_KEYS; + } + + @Override + public String getCredentialKey(String configKey) { + return HDFS_CREDENTIALS + configKey; + } +} + diff --git a/external/storm-autocreds/src/main/java/org/apache/storm/hdfs/security/AutoHDFSCommand.java b/external/storm-autocreds/src/main/java/org/apache/storm/hdfs/security/AutoHDFSCommand.java new file mode 100644 index 00000000000..b8e4396a930 --- /dev/null +++ b/external/storm-autocreds/src/main/java/org/apache/storm/hdfs/security/AutoHDFSCommand.java @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.hdfs.security; + +import static org.apache.storm.hdfs.security.HdfsSecurityUtil.STORM_KEYTAB_FILE_KEY; +import static org.apache.storm.hdfs.security.HdfsSecurityUtil.STORM_USER_NAME_KEY; + +import java.util.HashMap; +import java.util.Map; +import javax.security.auth.Subject; + +import org.apache.storm.Config; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Command tool of HDFS credential renewer. + */ +@SuppressWarnings("checkstyle:AbbreviationAsWordInName") +public final class AutoHDFSCommand { + private static final Logger LOG = LoggerFactory.getLogger(AutoHDFSCommand.class); + + private AutoHDFSCommand() { + } + + @SuppressWarnings("unchecked") + public static void main(String[] args) throws Exception { + Map conf = new HashMap<>(); + conf.put(STORM_USER_NAME_KEY, args[1]); //with realm e.g. hdfs@WITZEND.COM + conf.put(STORM_KEYTAB_FILE_KEY, args[2]); // /etc/security/keytabs/storm.keytab + + AutoHDFS autoHdfs = new AutoHDFS(); + autoHdfs.prepare(conf); + AutoHDFSNimbus autoHdfsNimbus = new AutoHDFSNimbus(); + autoHdfsNimbus.prepare(conf); + + Map creds = new HashMap<>(); + autoHdfsNimbus.populateCredentials(creds, conf, args[0]); + LOG.info("Got HDFS credentials", autoHdfs.getCredentials(creds)); + + Subject s = new Subject(); + autoHdfs.populateSubject(s, creds); + LOG.info("Got a Subject " + s); + + autoHdfsNimbus.renew(creds, conf, args[0]); + LOG.info("renewed credentials", autoHdfs.getCredentials(creds)); + } + +} diff --git a/external/storm-autocreds/src/main/java/org/apache/storm/hdfs/security/AutoHDFSNimbus.java b/external/storm-autocreds/src/main/java/org/apache/storm/hdfs/security/AutoHDFSNimbus.java new file mode 100644 index 00000000000..87ab4294543 --- /dev/null +++ b/external/storm-autocreds/src/main/java/org/apache/storm/hdfs/security/AutoHDFSNimbus.java @@ -0,0 +1,193 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.hdfs.security; + +import static org.apache.storm.hdfs.security.HdfsSecurityUtil.HDFS_CREDENTIALS; +import static org.apache.storm.hdfs.security.HdfsSecurityUtil.STORM_KEYTAB_FILE_KEY; +import static org.apache.storm.hdfs.security.HdfsSecurityUtil.STORM_USER_NAME_KEY; +import static org.apache.storm.hdfs.security.HdfsSecurityUtil.TOPOLOGY_HDFS_URI; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.ObjectOutputStream; +import java.net.URI; +import java.security.PrivilegedAction; +import java.util.Collection; +import java.util.List; +import java.util.Map; + +import org.apache.commons.math3.util.Pair; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.security.Credentials; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.storm.Config; +import org.apache.storm.common.AbstractHadoopNimbusPluginAutoCreds; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Auto credentials nimbus plugin for HDFS implementation. This class automatically + * gets HDFS delegation tokens and push it to user's topology. + */ +@SuppressWarnings("checkstyle:AbbreviationAsWordInName") +public class AutoHDFSNimbus extends AbstractHadoopNimbusPluginAutoCreds { + private static final Logger LOG = LoggerFactory.getLogger(AutoHDFSNimbus.class); + + private String hdfsKeyTab; + private String hdfsPrincipal; + + @Override + public void doPrepare(Map conf) { + if (conf.containsKey(STORM_KEYTAB_FILE_KEY) && conf.containsKey(STORM_USER_NAME_KEY)) { + hdfsKeyTab = (String) conf.get(STORM_KEYTAB_FILE_KEY); + hdfsPrincipal = (String) conf.get(STORM_USER_NAME_KEY); + } + } + + @Override + protected String getConfigKeyString() { + return HdfsSecurityUtil.HDFS_CREDENTIALS_CONFIG_KEYS; + } + + @Override + public void shutdown() { + //no op. + } + + @Override + protected byte[] getHadoopCredentials(Map conf, String configKey, final String topologyOwnerPrincipal) { + Configuration configuration = getHadoopConfiguration(conf, configKey); + return getHadoopCredentials(conf, configuration, topologyOwnerPrincipal); + } + + @Override + protected byte[] getHadoopCredentials(Map conf, final String topologyOwnerPrincipal) { + return getHadoopCredentials(conf, new Configuration(), topologyOwnerPrincipal); + } + + @SuppressWarnings("unchecked") + private byte[] getHadoopCredentials(Map conf, final Configuration configuration, final String topologySubmitterUser) { + try { + if (UserGroupInformation.isSecurityEnabled()) { + login(configuration); + + final URI nameNodeUri = conf.containsKey(TOPOLOGY_HDFS_URI) + ? new URI(conf.get(TOPOLOGY_HDFS_URI).toString()) + : FileSystem.getDefaultUri(configuration); + + UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); + + final UserGroupInformation proxyUser = UserGroupInformation.createProxyUser(topologySubmitterUser, ugi); + + Credentials creds = (Credentials) proxyUser.doAs(new PrivilegedAction() { + @Override + public Object run() { + try { + FileSystem fileSystem = FileSystem.get(nameNodeUri, configuration); + Credentials credential = proxyUser.getCredentials(); + + if (configuration.get(STORM_USER_NAME_KEY) == null) { + configuration.set(STORM_USER_NAME_KEY, hdfsPrincipal); + } + + fileSystem.addDelegationTokens(configuration.get(STORM_USER_NAME_KEY), credential); + LOG.info("Delegation tokens acquired for user {}", topologySubmitterUser); + return credential; + } catch (IOException e) { + throw new RuntimeException(e); + } + } + }); + + + ByteArrayOutputStream bao = new ByteArrayOutputStream(); + ObjectOutputStream out = new ObjectOutputStream(bao); + + creds.write(out); + out.flush(); + out.close(); + + return bao.toByteArray(); + } else { + throw new RuntimeException("Security is not enabled for HDFS"); + } + } catch (Exception ex) { + throw new RuntimeException("Failed to get delegation tokens." , ex); + } + } + + private Configuration getHadoopConfiguration(Map topoConf, String configKey) { + Configuration configuration = new Configuration(); + fillHadoopConfiguration(topoConf, configKey, configuration); + return configuration; + } + + /** + * {@inheritDoc} + */ + @Override + public void doRenew(Map credentials, Map topologyConf, final String topologyOwnerPrincipal) { + List confKeys = getConfigKeys(topologyConf); + for (Pair cred : getCredentials(credentials, confKeys)) { + try { + Configuration configuration = getHadoopConfiguration(topologyConf, cred.getFirst()); + Collection> tokens = cred.getSecond().getAllTokens(); + + if (tokens != null && !tokens.isEmpty()) { + for (Token token : tokens) { + //We need to re-login some other thread might have logged into hadoop using + // their credentials (e.g. AutoHBase might be also part of nimbu auto creds) + login(configuration); + long expiration = token.renew(configuration); + LOG.info("HDFS delegation token renewed, new expiration time {}", expiration); + } + } else { + LOG.debug("No tokens found for credentials, skipping renewal."); + } + } catch (Exception e) { + LOG.warn("could not renew the credentials, one of the possible reason is tokens are beyond " + + "renewal period so attempting to get new tokens.", + e); + populateCredentials(credentials, topologyConf, topologyOwnerPrincipal); + } + } + } + + private void login(Configuration configuration) throws IOException { + if (configuration.get(STORM_KEYTAB_FILE_KEY) == null) { + configuration.set(STORM_KEYTAB_FILE_KEY, hdfsKeyTab); + } + if (configuration.get(STORM_USER_NAME_KEY) == null) { + configuration.set(STORM_USER_NAME_KEY, hdfsPrincipal); + } + SecurityUtil.login(configuration, STORM_KEYTAB_FILE_KEY, STORM_USER_NAME_KEY); + + LOG.info("Logged into hdfs with principal {}", configuration.get(STORM_USER_NAME_KEY)); + } + + @Override + public String getCredentialKey(String configKey) { + return HDFS_CREDENTIALS + configKey; + } +} + diff --git a/external/storm-autocreds/src/main/java/org/apache/storm/hdfs/security/HdfsSecurityUtil.java b/external/storm-autocreds/src/main/java/org/apache/storm/hdfs/security/HdfsSecurityUtil.java new file mode 100644 index 00000000000..0f8ef9b4ef5 --- /dev/null +++ b/external/storm-autocreds/src/main/java/org/apache/storm/hdfs/security/HdfsSecurityUtil.java @@ -0,0 +1,76 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.hdfs.security; + +import static org.apache.storm.Config.TOPOLOGY_AUTO_CREDENTIALS; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; + +import org.apache.storm.security.auth.kerberos.AutoTGT; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This class provides util methods for storm-hdfs connector communicating + * with secured HDFS. + */ +public final class HdfsSecurityUtil { + public static final String STORM_KEYTAB_FILE_KEY = "hdfs.keytab.file"; + public static final String STORM_USER_NAME_KEY = "hdfs.kerberos.principal"; + public static final String HDFS_CREDENTIALS_CONFIG_KEYS = "hdfsCredentialsConfigKeys"; + public static final String HDFS_CREDENTIALS = "HDFS_CREDENTIALS"; + public static final String TOPOLOGY_HDFS_URI = "topology.hdfs.uri"; + + private static final Logger LOG = LoggerFactory.getLogger(HdfsSecurityUtil.class); + private static AtomicBoolean isLoggedIn = new AtomicBoolean(); + + private HdfsSecurityUtil() { + } + + public static void login(Map conf, Configuration hdfsConfig) throws IOException { + //If AutoHDFS is specified, do not attempt to login using keytabs, only kept for backward compatibility. + if (conf.get(TOPOLOGY_AUTO_CREDENTIALS) == null + || (!(((List) conf.get(TOPOLOGY_AUTO_CREDENTIALS)).contains(AutoHDFS.class.getName())) + && !(((List) conf.get(TOPOLOGY_AUTO_CREDENTIALS)).contains(AutoTGT.class.getName())))) { + if (UserGroupInformation.isSecurityEnabled()) { + // compareAndSet added because of https://issues.apache.org/jira/browse/STORM-1535 + if (isLoggedIn.compareAndSet(false, true)) { + LOG.info("Logging in using keytab as AutoHDFS is not specified for " + TOPOLOGY_AUTO_CREDENTIALS); + String keytab = (String) conf.get(STORM_KEYTAB_FILE_KEY); + if (keytab != null) { + hdfsConfig.set(STORM_KEYTAB_FILE_KEY, keytab); + } + String userName = (String) conf.get(STORM_USER_NAME_KEY); + if (userName != null) { + hdfsConfig.set(STORM_USER_NAME_KEY, userName); + } + SecurityUtil.login(hdfsConfig, STORM_KEYTAB_FILE_KEY, STORM_USER_NAME_KEY); + } + } + } + } +} \ No newline at end of file diff --git a/external/storm-blobstore-migration/Makefile b/external/storm-blobstore-migration/Makefile new file mode 100644 index 00000000000..b05a432e69e --- /dev/null +++ b/external/storm-blobstore-migration/Makefile @@ -0,0 +1,35 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +PACKAGE_NAME=blobstore-migrator.tgz + +VERSION=$(shell cat VERSION || mvn help:evaluate -Dexpression=project.version | grep -v '^\[') + +all: $(PACKAGE_NAME) + +$(PACKAGE_NAME) : VERSION target/blobstore-migrator-$(VERSION).jar + -@rm -Rf blobstore-migrator $(PACKAGE_NAME) + mkdir blobstore-migrator + cp target/blobstore-migrator-$(VERSION).jar blobstore-migrator/ + cp listHDFS.sh listLocal.sh migrate.sh VERSION blobstore-migrator/ + tar -cvzf $(PACKAGE_NAME) blobstore-migrator + rm -Rf blobstore-migrator + +target/blobstore-migrator-$(VERSION).jar : + mvn clean install + +VERSION : + echo $(VERSION) >VERSION diff --git a/external/storm-blobstore-migration/README.md b/external/storm-blobstore-migration/README.md new file mode 100644 index 00000000000..28805a13095 --- /dev/null +++ b/external/storm-blobstore-migration/README.md @@ -0,0 +1,106 @@ +# Blobstore Migrator + +## Basic Use +----- + +### Build The Thing +Use make to build a tarball with everything needed. +``` +$ make +``` + +### Use The Thing +Copy and extract the tarball +``` +$ scp blobstore-migrator.tgz my-nimbus-host.example.com:~/ +$ ssh my-nimbus-host.example.com +... On my-nimbus-host ... +$ tar -xvzf blobstore-migrator.tgz +``` + +This will expand into a blobstore-migrator directory with all the scripts and the jar. +``` +$ cd blobstore-migrator +$ ls +blobstore-migrator-2.0.jar listHDFS.sh listLocal.sh migrate.sh +``` + +To run, first create a config for the cluster. +The config must be named 'config' +It must contain definitions for `HDFS_BLOBSTORE_DIR`, `LOCAL_BLOBSTORE_DIR`, and `HADOOP_CLASSPATH`. +Hadoop jars are packaged with neither storm nor this package, so they must be installed separately. + +Optional configs used to configure security are: `BLOBSTORE_PRINCIPAL`, `KEYTAB_FILE`, and `JAAS_CONF` + +Example: +``` +$ cat config +HDFS_BLOBSTORE_DIR='hdfs://some-hdfs-namenode:8080/srv/storm/my-storm-blobstore' +LOCAL_BLOBSTORE_DIR='/srv/storm' +HADOOP_CLASSPATH='/hadoop/share/hdfs/*:/hadoop/common/*' + +# My security configs: +BLOBSTORE_PRINCIPAL='stormUser/my-nimbus-host.example.com@STORM.EXAMPLE.COM' +KEYTAB_FILE='/srv/my-keytab/stormUser.kt' +JAAS_CONF='/storm/conf/storm_jaas.conf' +``` + +Now you can run any of the scripts, all of which require config to exist: + - listHDFS.sh: lists all blobs currently in the HDFS Blobstore + - listLocal.sh: lists all blobs currently in the local Blobstore + - migrate.sh: Begins the migration process for Nimbus. (Read instructions below first) + + +#### Migrating +##### Nimbus +To migrate blobs from nimbus, the following steps are necessary: + +1. Shut down all Nimbus Instances +2. Backup storm config +3. Change the following settings in Nimbus' storm config: + * blobstore.dir + * blobstore.hdfs.principal + * blobstore.hdfs.keytab + * blobstore.replication.factor + * nimbus.blobstore.class +4. Configure server so that the environment variable `STORM_EXT_CLASSPATH` includes whatever `HADOOP_CLASSPATH` contains when `storm nimbus` is run. +5. Run the migrate.sh script on the master Nimbus. It will migrate the blobs from the LocalFsBlobStore to the HdfsBlobStore, and then exit. +6. Double check to make sure the storm configs look sane, and the blobs are where they should be. (listHDFS.sh, listLocal.sh) + +Once everything looks good, start the Nimbus Instances and the Nimbus BlobStore migration will be done. + +If something goes wrong during this process, restore the config that you backed up in step 1 and then start Nimbus. Nimbus will use the Local Blobstore as before. + +##### Supervisors +Supervisors can be upgraded by performing the following steps: +1. Shut down the supervisor. +2. Putting the following blobstore settings in place: + * blobstore.dir + * blobstore.hdfs.principal + * blobstore.hdfs.keytab + * blobstore.replication.factor + * supervisor.blobstore.class +3. Kill all remaining worker processes (this is ugly) +4. Wipe the local state +5. Start the supervisor. + +The reason for the hard wipe of the supervisor state is due to spurious errors during supervisor migration that were only solved by wiping out the local state. This may not be the best solution, but it does seem to work predictably. + +## License + +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, +software distributed under the License is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied. See the License for the +specific language governing permissions and limitations +under the License. diff --git a/external/storm-blobstore-migration/config.sample b/external/storm-blobstore-migration/config.sample new file mode 100644 index 00000000000..feb9a96f54a --- /dev/null +++ b/external/storm-blobstore-migration/config.sample @@ -0,0 +1,24 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +HDFS_BLOBSTORE_DIR='hdfs://some-hdfs-namenode:8080/srv/storm/my-storm-blobstore' +LOCAL_BLOBSTORE_DIR='/srv/storm' +HADOOP_CLASSPATH='/hadoop-2.6.5/etc/hadoop/:/hadoop-2.6.5/share/hadoop/common/lib/*:/hadoop-2.6.5/share/hadoop/common/*:/hadoop-2.6.5/share/hadoop/hdfs:/hadoop-2.6.5/share/hadoop/hdfs/lib/*:/hadoop-2.6.5/share/hadoop/hdfs/*:/hadoop-2.6.5/share/hadoop/yarn/lib/*:/hadoop-2.6.5/share/hadoop/yarn/*:/hadoop-2.6.5/share/hadoop/mapreduce/lib/*:/hadoop-2.6.5/share/hadoop/mapreduce/*' + +## Optional security configs +BLOBSTORE_PRINCIPAL='stormUser/my-nimbus-host.example.com@STORM.EXAMPLE.COM' +KEYTAB_FILE='/srv/my-keytab/stormUser.kt' +JAAS_CONF='/storm/conf/storm_jaas.conf' \ No newline at end of file diff --git a/external/storm-blobstore-migration/listHDFS.sh b/external/storm-blobstore-migration/listHDFS.sh new file mode 100755 index 00000000000..9662a02de78 --- /dev/null +++ b/external/storm-blobstore-migration/listHDFS.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +. config +VERSION=`cat VERSION` +MIGRATION_JAR=blobstore-migrator-${VERSION}.jar + +if [ -n "$JAAS_CONF" ]; then + java -Djava.security.auth.login.config=$JAAS_CONF -cp $HADOOP_CLASSPATH:$MIGRATION_JAR org.apache.storm.blobstore.MigratorMain listHDFS $HDFS_BLOBSTORE_DIR $BLOBSTORE_PRINCIPAL $KEYTAB_FILE; +else + java -cp $HADOOP_CLASSPATH:$MIGRATION_JAR org.apache.storm.blobstore.MigratorMain listHDFS $HDFS_BLOBSTORE_DIR $BLOBSTORE_PRINCIPAL $KEYTAB_FILE; +fi diff --git a/external/storm-blobstore-migration/listLocal.sh b/external/storm-blobstore-migration/listLocal.sh new file mode 100755 index 00000000000..a8a8708ed4c --- /dev/null +++ b/external/storm-blobstore-migration/listLocal.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +. config +VERSION=`cat VERSION` +MIGRATION_JAR=blobstore-migrator-${VERSION}.jar + +if [ -n "$JAAS_CONF" ]; then + java -Djava.security.auth.login.config=$JAAS_CONF -cp $HADOOP_CLASSPATH:$MIGRATION_JAR org.apache.storm.blobstore.MigratorMain listLocalFs $LOCAL_BLOBSTORE_DIR +else + java -cp $HADOOP_CLASSPATH:$MIGRATION_JAR org.apache.storm.blobstore.MigratorMain listLocalFs $LOCAL_BLOBSTORE_DIR +fi diff --git a/external/storm-blobstore-migration/migrate.sh b/external/storm-blobstore-migration/migrate.sh new file mode 100755 index 00000000000..687af32bc44 --- /dev/null +++ b/external/storm-blobstore-migration/migrate.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +. config +VERSION=`cat VERSION` +MIGRATION_JAR=blobstore-migrator-${VERSION}.jar + +if [ -n "$JAAS_CONF" ]; then + java -cp $HADOOP_CLASSPATH:$MIGRATION_JAR org.apache.storm.blobstore.MigratorMain migrate $LOCAL_BLOBSTORE_DIR $HDFS_BLOBSTORE_DIR $BLOBSTORE_PRINCIPAL $KEYTAB_FILE +else + java -Djava.security.auth.login.config=$JAAS_CONF -cp $HADOOP_CLASSPATH:$MIGRATION_JAR org.apache.storm.blobstore.MigratorMain migrate $LOCAL_BLOBSTORE_DIR $HDFS_BLOBSTORE_DIR $BLOBSTORE_PRINCIPAL $KEYTAB_FILE +fi + +echo "Double check everything is correct, then start nimbus." diff --git a/external/storm-blobstore-migration/pom.xml b/external/storm-blobstore-migration/pom.xml new file mode 100644 index 00000000000..fb622e1fab5 --- /dev/null +++ b/external/storm-blobstore-migration/pom.xml @@ -0,0 +1,158 @@ + + + + 4.0.0 + + + storm + org.apache.storm + 2.8.4-SNAPSHOT + ../../pom.xml + + + blobstore-migrator + jar + + blobstore-migrator + https://maven.apache.org + + + + org.apache.storm + storm-server + ${project.version} + + + + org.slf4j + log4j-over-slf4j + + + ch.qos.reload4j + reload4j + + + org.slf4j + slf4j-reload4j + + + + + + org.apache.storm + storm-hdfs-blobstore + ${project.version} + + + + org.slf4j + log4j-over-slf4j + + + + + org.apache.hadoop + hadoop-common + ${hadoop.version} + + + + * + * + + + + + com.fasterxml.woodstox + woodstox-core + + + commons-collections + commons-collections + + + org.apache.commons + commons-configuration2 + + + commons-codec + commons-codec + + + org.apache.avro + avro + + + com.google.guava + guava + + + + + + maven-compiler-plugin + + ${maven.compiler.source} + ${maven.compiler.target} + + + + org.apache.maven.plugins + maven-jar-plugin + + + + org.apache.storm.blobstore.MigratorMain + + + + + + org.apache.maven.plugins + maven-shade-plugin + + + package + + shade + + + + + + org.apache.storm.blobstore.MigratorMain + + + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + + + org.apache.maven.plugins + maven-pmd-plugin + + + + diff --git a/external/storm-blobstore-migration/src/main/java/org/apache/storm/blobstore/ListHDFS.java b/external/storm-blobstore-migration/src/main/java/org/apache/storm/blobstore/ListHDFS.java new file mode 100644 index 00000000000..cfa71312c24 --- /dev/null +++ b/external/storm-blobstore-migration/src/main/java/org/apache/storm/blobstore/ListHDFS.java @@ -0,0 +1,70 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.blobstore; + +import java.util.Map; + +import javax.security.auth.Subject; + +import org.apache.storm.Config; +import org.apache.storm.blobstore.ClientBlobStore; +import org.apache.storm.hdfs.blobstore.HdfsBlobStore; +import org.apache.storm.hdfs.blobstore.HdfsClientBlobStore; +import org.apache.storm.utils.Utils; + +@SuppressWarnings("checkstyle:AbbreviationAsWordInName") +public class ListHDFS { + + public static void main(String[] args) throws Exception { + if (args.length < 1) { + System.out.println("Need at least 1 argument (hdfs_blobstore_path), but have " + Integer.toString(args.length)); + System.out.println("listHDFS "); + System.out.println("Lists blobs in HdfsBlobStore"); + System.out.println("Example: listHDFS " + + "'hdfs://some-hdfs-namenode:8080/srv/storm/my-storm-blobstore' " + + "'stormUser/my-nimbus-host.example.com@STORM.EXAMPLE.COM' '/srv/my-keytab/stormUser.kt'"); + System.exit(1); + } + + Map hdfsConf = Utils.readStormConfig(); + String hdfsBlobstorePath = args[0]; + + hdfsConf.put(Config.BLOBSTORE_DIR, hdfsBlobstorePath); + hdfsConf.put(Config.STORM_PRINCIPAL_TO_LOCAL_PLUGIN, "org.apache.storm.security.auth.DefaultPrincipalToLocal"); + if (args.length >= 2) { + System.out.println("SETTING HDFS PRINCIPAL!"); + hdfsConf.put(Config.STORM_HDFS_LOGIN_PRINCIPAL, args[1]); + } + if (args.length >= 3) { + System.out.println("SETTING HDFS KEYTAB!"); + hdfsConf.put(Config.STORM_HDFS_LOGIN_KEYTAB, args[2]); + } + + /* CREATE THE BLOBSTORES */ + HdfsBlobStore hdfsBlobStore = new HdfsBlobStore(); + hdfsBlobStore.prepare(hdfsConf, null, null, null); + + /* LOOK AT HDFS BLOBSTORE */ + System.out.println("Listing HDFS blobstore keys."); + MigratorMain.listBlobStoreKeys(hdfsBlobStore, null); + System.out.println("Done Listing HDFS blobstore keys."); + + hdfsBlobStore.shutdown(); + } +} diff --git a/external/storm-blobstore-migration/src/main/java/org/apache/storm/blobstore/ListLocalFs.java b/external/storm-blobstore-migration/src/main/java/org/apache/storm/blobstore/ListLocalFs.java new file mode 100644 index 00000000000..aefddb8869b --- /dev/null +++ b/external/storm-blobstore-migration/src/main/java/org/apache/storm/blobstore/ListLocalFs.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.blobstore; + +import java.util.Map; + +import javax.security.auth.Subject; + +import org.apache.storm.Config; +import org.apache.storm.blobstore.LocalFsBlobStore; +import org.apache.storm.nimbus.NimbusInfo; +import org.apache.storm.utils.Utils; + +public class ListLocalFs { + + public static void main(String[] args) throws Exception { + + if (args.length != 1) { + System.out.println("Need 1 arguments, but have " + Integer.toString(args.length)); + System.out.println("listLocalFs "); + System.out.println("Migrates blobs from LocalFsBlobStore to HdfsBlobStore"); + System.out.println("Example: listLocalFs '/srv/storm'"); + System.exit(1); + } + + Map lfsConf = Utils.readStormConfig(); + lfsConf.put(Config.BLOBSTORE_DIR, args[0]); + lfsConf.put(Config.STORM_PRINCIPAL_TO_LOCAL_PLUGIN, "org.apache.storm.security.auth.DefaultPrincipalToLocal"); + + /* CREATE THE BLOBSTORE */ + LocalFsBlobStore lfsBlobStore = new LocalFsBlobStore(); + lfsBlobStore.prepare(lfsConf, null, NimbusInfo.fromConf(lfsConf), null); + + /* LOOK AT HDFS BLOBSTORE */ + System.out.println("Listing Local blobstore keys."); + MigratorMain.listBlobStoreKeys(lfsBlobStore, null); + System.out.println("Done Listing Local blobstore keys."); + + lfsBlobStore.shutdown(); + } +} diff --git a/external/storm-blobstore-migration/src/main/java/org/apache/storm/blobstore/MigrateBlobs.java b/external/storm-blobstore-migration/src/main/java/org/apache/storm/blobstore/MigrateBlobs.java new file mode 100644 index 00000000000..e7a3581637d --- /dev/null +++ b/external/storm-blobstore-migration/src/main/java/org/apache/storm/blobstore/MigrateBlobs.java @@ -0,0 +1,145 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.blobstore; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Map; + +import javax.security.auth.Subject; +import javax.security.auth.login.LoginContext; + +import org.apache.storm.Config; +import org.apache.storm.blobstore.BlobStore; +import org.apache.storm.blobstore.LocalFsBlobStore; +import org.apache.storm.generated.AuthorizationException; +import org.apache.storm.generated.KeyAlreadyExistsException; +import org.apache.storm.generated.KeyNotFoundException; +import org.apache.storm.generated.ReadableBlobMeta; +import org.apache.storm.generated.SettableBlobMeta; +import org.apache.storm.hdfs.blobstore.HdfsBlobStore; +import org.apache.storm.nimbus.NimbusInfo; +import org.apache.storm.utils.Utils; + +public class MigrateBlobs { + + protected static void deleteAllBlobStoreKeys(BlobStore bs, Subject who) throws AuthorizationException, KeyNotFoundException { + Iterable hdfsKeys = () -> bs.listKeys(); + for (String key : hdfsKeys) { + System.out.println(key); + bs.deleteBlob(key, who); + } + } + + protected static void copyBlobStoreKeys(BlobStore bsFrom, + Subject whoFrom, + BlobStore bsTo, Subject whoTo) throws AuthorizationException, + KeyAlreadyExistsException, + IOException, + KeyNotFoundException { + Iterable lfsKeys = () -> bsFrom.listKeys(); + for (String key : lfsKeys) { + ReadableBlobMeta readableMeta = bsFrom.getBlobMeta(key, whoFrom); + SettableBlobMeta meta = readableMeta.get_settable(); + InputStream in = bsFrom.getBlob(key, whoFrom); + System.out.println("COPYING BLOB " + key + " FROM " + bsFrom + " TO " + bsTo); + bsTo.createBlob(key, in, meta, whoTo); + System.out.println("DONE CREATING BLOB " + key); + } + } + + + public static void main(String[] args) throws Exception { + Map hdfsConf = Utils.readStormConfig(); + + if (args.length < 2) { + System.out.println("Need at least 2 arguments, but have " + Integer.toString(args.length)); + System.out.println("migrate "); + System.out.println("Migrates blobs from LocalFsBlobStore to HdfsBlobStore"); + System.out.println("Example: migrate '/srv/storm' " + + "'hdfs://some-hdfs-namenode:8080/srv/storm/my-storm-blobstore' " + + "'stormUser/my-nimbus-host.example.com@STORM.EXAMPLE.COM' '/srv/my-keytab/stormUser.kt'"); + System.exit(1); + } + + String hdfsBlobstorePath = args[1]; + + hdfsConf.put(Config.BLOBSTORE_DIR, hdfsBlobstorePath); + hdfsConf.put(Config.STORM_PRINCIPAL_TO_LOCAL_PLUGIN, "org.apache.storm.security.auth.DefaultPrincipalToLocal"); + if (args.length >= 3) { + System.out.println("SETTING HDFS PRINCIPAL!"); + hdfsConf.put(Config.STORM_HDFS_LOGIN_PRINCIPAL, args[2]); + } + if (args.length >= 4) { + System.out.println("SETTING HDFS KEYTAB!"); + hdfsConf.put(Config.STORM_HDFS_LOGIN_KEYTAB, args[3]); + } + hdfsConf.put(Config.STORM_BLOBSTORE_REPLICATION_FACTOR, 7); + + Map lfsConf = Utils.readStormConfig(); + String localBlobstoreDir = args[0]; + lfsConf.put(Config.BLOBSTORE_DIR, localBlobstoreDir); + lfsConf.put(Config.STORM_PRINCIPAL_TO_LOCAL_PLUGIN, "org.apache.storm.security.auth.DefaultPrincipalToLocal"); + + + /* CREATE THE BLOBSTORES */ + LocalFsBlobStore lfsBlobStore = new LocalFsBlobStore(); + lfsBlobStore.prepare(lfsConf, null, NimbusInfo.fromConf(lfsConf), null); + + HdfsBlobStore hdfsBlobStore = new HdfsBlobStore(); + hdfsBlobStore.prepare(hdfsConf, null, null, null); + + + /* LOOK AT LOCAL BLOBSTORE */ + System.out.println("Listing local blobstore keys."); + MigratorMain.listBlobStoreKeys(lfsBlobStore, null); + System.out.println("Done listing local blobstore keys."); + + /* LOOK AT HDFS BLOBSTORE */ + System.out.println("Listing HDFS blobstore keys."); + MigratorMain.listBlobStoreKeys(hdfsBlobStore, null); + System.out.println("Done listing HDFS blobstore keys."); + + + System.out.println("Going to delete everything in HDFS, then copy all local blobs to HDFS. Continue? [Y/n]"); + String resp = System.console().readLine().toLowerCase().trim(); + if (!(resp.equals("y") || resp.equals(""))) { + System.out.println("Not copying blobs. Exiting. [" + resp.toLowerCase().trim() + "]"); + System.exit(1); + } + + /* DELETE EVERYTHING IN HDFS */ + System.out.println("Deleting blobs from HDFS."); + deleteAllBlobStoreKeys(hdfsBlobStore, null); + System.out.println("DONE deleting blobs from HDFS."); + + /* COPY EVERYTHING FROM LOCAL BLOBSTORE TO HDFS */ + System.out.println("Copying local blobstore keys."); + copyBlobStoreKeys(lfsBlobStore, null, hdfsBlobStore, null); + System.out.println("DONE Copying local blobstore keys."); + + /* LOOK AT HDFS BLOBSTORE AGAIN */ + System.out.println("Listing HDFS blobstore keys."); + MigratorMain.listBlobStoreKeys(hdfsBlobStore, null); + System.out.println("Done listing HDFS blobstore keys."); + + hdfsBlobStore.shutdown(); + System.out.println("Done Migrating!"); + } +} diff --git a/external/storm-blobstore-migration/src/main/java/org/apache/storm/blobstore/MigratorMain.java b/external/storm-blobstore-migration/src/main/java/org/apache/storm/blobstore/MigratorMain.java new file mode 100644 index 00000000000..03d163aad27 --- /dev/null +++ b/external/storm-blobstore-migration/src/main/java/org/apache/storm/blobstore/MigratorMain.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.blobstore; + +import java.util.Arrays; + +import javax.security.auth.Subject; + +public class MigratorMain { + + public static void listBlobStoreKeys(BlobStore bs, Subject who) { + Iterable bsKeys = () -> bs.listKeys(); + for (String key : bsKeys) { + System.out.println(key); + } + } + + private static void usage() { + System.out.println("Commands:"); + System.out.println("\tlistHDFS"); + System.out.println("\tlistLocalFs"); + System.out.println("\tmigrate"); + } + + public static void main(String[] args) throws Exception { + if (args.length == 0) { + usage(); + } + + if (args[0].equals("listHDFS")) { + ListHDFS.main(Arrays.copyOfRange(args, 1, args.length)); + } else if (args[0].equals("listLocalFs")) { + ListLocalFs.main(Arrays.copyOfRange(args, 1, args.length)); + } else if (args[0].equals("migrate")) { + MigrateBlobs.main(Arrays.copyOfRange(args, 1, args.length)); + } else { + System.out.println("Not recognized: " + args[0]); + usage(); + } + } +} diff --git a/external/storm-hdfs-blobstore/pom.xml b/external/storm-hdfs-blobstore/pom.xml new file mode 100644 index 00000000000..f4e54fbd550 --- /dev/null +++ b/external/storm-hdfs-blobstore/pom.xml @@ -0,0 +1,164 @@ + + + + 4.0.0 + + + storm + org.apache.storm + 2.8.4-SNAPSHOT + ../../pom.xml + + + storm-hdfs-blobstore + + + + org.slf4j + slf4j-api + + + org.apache.storm + storm-client + ${project.version} + ${provided.scope} + + + org.apache.storm + storm-autocreds + ${project.version} + + + org.apache.hadoop + hadoop-common + + + org.apache.hadoop + hadoop-hdfs + + + org.apache.hadoop + hadoop-auth + + + + + org.apache.hadoop + hadoop-client-api + ${hadoop.version} + + + org.apache.hadoop + hadoop-client-runtime + ${hadoop.version} + + + com.google.guava + guava + + + commons-io + commons-io + + + org.apache.hadoop + hadoop-client-minicluster + ${hadoop.version} + test + + + org.eclipse.jetty.ee10 + jetty-ee10-servlet + ${jetty.version} + test + + + org.eclipse.jetty.ee10 + jetty-ee10-webapp + ${jetty.version} + test + + + org.junit.jupiter + junit-jupiter-params + test + + + org.mockito + mockito-core + test + + + org.hamcrest + hamcrest + test + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + false + 1 + + + + org.apache.maven.plugins + maven-jar-plugin + + + + test-jar + + + + + + maven-clean-plugin + + + cleanup + clean + + clean + + + true + + + ./build/ + + + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + + + org.apache.maven.plugins + maven-pmd-plugin + + + + diff --git a/external/storm-hdfs-blobstore/src/main/java/org/apache/storm/hdfs/blobstore/HdfsBlobStore.java b/external/storm-hdfs-blobstore/src/main/java/org/apache/storm/hdfs/blobstore/HdfsBlobStore.java new file mode 100644 index 00000000000..7d957186a21 --- /dev/null +++ b/external/storm-hdfs-blobstore/src/main/java/org/apache/storm/hdfs/blobstore/HdfsBlobStore.java @@ -0,0 +1,421 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.hdfs.blobstore; + +import static org.apache.storm.blobstore.BlobStoreAclHandler.ADMIN; +import static org.apache.storm.blobstore.BlobStoreAclHandler.READ; +import static org.apache.storm.blobstore.BlobStoreAclHandler.WRITE; + +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import java.io.ByteArrayOutputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.util.Iterator; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import javax.security.auth.Subject; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.storm.Config; +import org.apache.storm.blobstore.AtomicOutputStream; +import org.apache.storm.blobstore.BlobStore; +import org.apache.storm.blobstore.BlobStoreAclHandler; +import org.apache.storm.blobstore.BlobStoreFile; +import org.apache.storm.blobstore.InputStreamWithMeta; +import org.apache.storm.generated.AuthorizationException; +import org.apache.storm.generated.KeyAlreadyExistsException; +import org.apache.storm.generated.KeyNotFoundException; +import org.apache.storm.generated.ReadableBlobMeta; +import org.apache.storm.generated.SettableBlobMeta; +import org.apache.storm.nimbus.ILeaderElector; +import org.apache.storm.nimbus.NimbusInfo; +import org.apache.storm.utils.HadoopLoginUtil; +import org.apache.storm.utils.Utils; +import org.apache.storm.utils.WrappedKeyAlreadyExistsException; +import org.apache.storm.utils.WrappedKeyNotFoundException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Provides a HDFS file system backed blob store implementation. + * Note that this provides an api for having HDFS be the backing store for the blobstore, + * it is not a service/daemon. + * + *

We currently have NIMBUS_ADMINS and SUPERVISOR_ADMINS configuration. NIMBUS_ADMINS are given READ, WRITE and ADMIN + * access whereas the SUPERVISOR_ADMINS are given READ access in order to read and download the blobs form the nimbus. + * + *

The ACLs for the blob store are validated against whether the subject is a NIMBUS_ADMIN, SUPERVISOR_ADMIN or USER + * who has read, write or admin privileges in order to perform respective operations on the blob. + * + *

For hdfs blob store + * 1. The USER interacts with nimbus to upload and access blobs through NimbusBlobStore Client API. Here, unlike + * local blob store which stores the blobs locally, the nimbus talks to HDFS to upload the blobs. + * 2. The USER sets the ACLs, and the blob access is validated against these ACLs. + * 3. The SUPERVISOR interacts with nimbus through HdfsClientBlobStore to download the blobs. Here, unlike local + * blob store the supervisor interacts with HDFS directly to download the blobs. The call to HdfsBlobStore is made as a "null" + * subject. The blobstore gets the hadoop user and validates permissions for the supervisor. + */ +public class HdfsBlobStore extends BlobStore { + private static final Logger LOG = LoggerFactory.getLogger(HdfsBlobStore.class); + private static final String DATA_PREFIX = "data_"; + private static final String META_PREFIX = "meta_"; + + private BlobStoreAclHandler aclHandler; + private HdfsBlobStoreImpl hbs; + private Subject localSubject; + private Map conf; + private Cache cacheMetas = CacheBuilder.newBuilder().expireAfterWrite(10, TimeUnit.MINUTES).build(); + private Cache cachedReplicationCount = CacheBuilder.newBuilder().expireAfterWrite(10, TimeUnit.MINUTES).build(); + + /** + * If who is null then we want to use the user hadoop says we are. + * Required for the supervisor to call these routines as its not + * logged in as anyone. + */ + private Subject checkAndGetSubject(Subject who) { + if (who == null) { + return localSubject; + } + return who; + } + + @Override + public void prepare(Map conf, String overrideBase, NimbusInfo nimbusInfo, ILeaderElector leaderElector) { + this.conf = conf; + prepareInternal(conf, overrideBase, null); + } + + /** + * Allow a Hadoop Configuration to be passed for testing. If it's null then the hadoop configs + * must be in your classpath. + */ + protected void prepareInternal(Map conf, String overrideBase, Configuration hadoopConf) { + this.conf = conf; + if (overrideBase == null) { + overrideBase = (String) conf.get(Config.BLOBSTORE_DIR); + } + if (overrideBase == null) { + throw new RuntimeException("You must specify a blobstore directory for HDFS to use!"); + } + LOG.debug("directory is: {}", overrideBase); + + //Login to hdfs + localSubject = HadoopLoginUtil.loginHadoop(conf); + + aclHandler = new BlobStoreAclHandler(conf); + Path baseDir = new Path(overrideBase, BASE_BLOBS_DIR_NAME); + try { + if (hadoopConf != null) { + hbs = new HdfsBlobStoreImpl(baseDir, conf, hadoopConf); + } else { + hbs = new HdfsBlobStoreImpl(baseDir, conf); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + public AtomicOutputStream createBlob(String key, SettableBlobMeta meta, Subject who) + throws AuthorizationException, KeyAlreadyExistsException { + if (meta.get_replication_factor() <= 0) { + meta.set_replication_factor((int) conf.get(Config.STORM_BLOBSTORE_REPLICATION_FACTOR)); + } + who = checkAndGetSubject(who); + validateKey(key); + aclHandler.normalizeSettableBlobMeta(key, meta, who, READ | WRITE | ADMIN); + BlobStoreAclHandler.validateSettableACLs(key, meta.get_acl()); + aclHandler.hasPermissions(meta.get_acl(), READ | WRITE | ADMIN, who, key); + if (hbs.exists(DATA_PREFIX + key)) { + throw new WrappedKeyAlreadyExistsException(key); + } + BlobStoreFileOutputStream outputStream = null; + try { + BlobStoreFile metaFile = hbs.write(META_PREFIX + key, true); + metaFile.setMetadata(meta); + outputStream = new BlobStoreFileOutputStream(metaFile); + outputStream.write(Utils.thriftSerialize(meta)); + outputStream.close(); + outputStream = null; + BlobStoreFile dataFile = hbs.write(DATA_PREFIX + key, true); + dataFile.setMetadata(meta); + cacheMetas.put(key, meta); + return new BlobStoreFileOutputStream(dataFile); + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + if (outputStream != null) { + try { + outputStream.cancel(); + } catch (IOException e) { + //Ignored + } + } + } + } + + @Override + public AtomicOutputStream updateBlob(String key, Subject who) + throws AuthorizationException, KeyNotFoundException { + who = checkAndGetSubject(who); + SettableBlobMeta meta = extractBlobMeta(key); + validateKey(key); + aclHandler.hasPermissions(meta.get_acl(), WRITE, who, key); + try { + BlobStoreFile dataFile = hbs.write(DATA_PREFIX + key, false); + dataFile.setMetadata(meta); + return new BlobStoreFileOutputStream(dataFile); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private SettableBlobMeta getStoredBlobMeta(String key) throws KeyNotFoundException { + InputStream in = null; + try { + BlobStoreFile pf = hbs.read(META_PREFIX + key); + try { + in = pf.getInputStream(); + } catch (FileNotFoundException fnf) { + throw new WrappedKeyNotFoundException(key); + } + ByteArrayOutputStream out = new ByteArrayOutputStream(); + byte[] buffer = new byte[2048]; + int len; + while ((len = in.read(buffer)) > 0) { + out.write(buffer, 0, len); + } + in.close(); + in = null; + SettableBlobMeta blobMeta = Utils.thriftDeserialize(SettableBlobMeta.class, out.toByteArray()); + return blobMeta; + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + if (in != null) { + try { + in.close(); + } catch (IOException e) { + //Ignored + } + } + } + } + + @Override + public ReadableBlobMeta getBlobMeta(String key, Subject who) + throws AuthorizationException, KeyNotFoundException { + who = checkAndGetSubject(who); + validateKey(key); + SettableBlobMeta meta = extractBlobMeta(key); + aclHandler.validateUserCanReadMeta(meta.get_acl(), who, key); + ReadableBlobMeta rbm = new ReadableBlobMeta(); + rbm.set_settable(meta); + try { + BlobStoreFile pf = hbs.read(DATA_PREFIX + key); + rbm.set_version(pf.getModTime()); + } catch (IOException e) { + throw new RuntimeException(e); + } + return rbm; + } + + /** + * Sets leader elector (only used by LocalFsBlobStore to help sync blobs between Nimbi. + * + * @param leaderElector the leader elector + */ + @Override + public void setLeaderElector(ILeaderElector leaderElector) { + // NO-OP + } + + @Override + public void setBlobMeta(String key, SettableBlobMeta meta, Subject who) + throws AuthorizationException, KeyNotFoundException { + if (meta.get_replication_factor() <= 0) { + meta.set_replication_factor((int) conf.get(Config.STORM_BLOBSTORE_REPLICATION_FACTOR)); + } + who = checkAndGetSubject(who); + validateKey(key); + aclHandler.normalizeSettableBlobMeta(key, meta, who, ADMIN); + BlobStoreAclHandler.validateSettableACLs(key, meta.get_acl()); + SettableBlobMeta orig = extractBlobMeta(key); + aclHandler.hasPermissions(orig.get_acl(), ADMIN, who, key); + writeMetadata(key, meta); + } + + @Override + public void deleteBlob(String key, Subject who) + throws AuthorizationException, KeyNotFoundException { + who = checkAndGetSubject(who); + validateKey(key); + SettableBlobMeta meta = extractBlobMeta(key); + aclHandler.hasPermissions(meta.get_acl(), WRITE, who, key); + try { + hbs.deleteKey(DATA_PREFIX + key); + hbs.deleteKey(META_PREFIX + key); + } catch (IOException e) { + throw new RuntimeException(e); + } + cacheMetas.invalidate(key); + cachedReplicationCount.invalidate(key); + } + + @Override + public InputStreamWithMeta getBlob(String key, Subject who) + throws AuthorizationException, KeyNotFoundException { + who = checkAndGetSubject(who); + validateKey(key); + SettableBlobMeta meta = extractBlobMeta(key); + aclHandler.hasPermissions(meta.get_acl(), READ, who, key); + try { + return new BlobStoreFileInputStream(hbs.read(DATA_PREFIX + key)); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + /** + * Checks if a blob exists. + * + * @param key blobstore key + * @param who subject + * @throws AuthorizationException if authorization is failed + */ + public boolean blobExists(String key, Subject who) throws AuthorizationException { + try { + who = checkAndGetSubject(who); + validateKey(key); + SettableBlobMeta meta = extractBlobMeta(key); + aclHandler.hasPermissions(meta.get_acl(), READ, who, key); + } catch (KeyNotFoundException e) { + return false; + } + return true; + } + + @Override + public Iterator listKeys() { + try { + return new KeyTranslationIterator(hbs.listKeys(), DATA_PREFIX); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + public void shutdown() { + //Empty + } + + @Override + public int getBlobReplication(String key, Subject who) throws AuthorizationException, KeyNotFoundException { + who = checkAndGetSubject(who); + validateKey(key); + SettableBlobMeta meta = extractBlobMeta(key); + aclHandler.hasAnyPermissions(meta.get_acl(), READ | WRITE | ADMIN, who, key); + try { + Integer cachedCount = cachedReplicationCount.getIfPresent(key); + int blobReplication = 0; + if (cachedCount != null) { + blobReplication = cachedCount.intValue(); + } else { + blobReplication = hbs.getBlobReplication(DATA_PREFIX + key); + cachedReplicationCount.put(key, blobReplication); + } + return blobReplication; + } catch (IOException exp) { + throw new RuntimeException(exp); + } + } + + private SettableBlobMeta extractBlobMeta(String key) throws KeyNotFoundException { + if (key == null) { + throw new WrappedKeyNotFoundException("null can not be blob key"); + } + SettableBlobMeta meta = cacheMetas.getIfPresent(key); + if (meta == null) { + meta = getStoredBlobMeta(key); + cacheMetas.put(key, meta); + } + return meta; + } + + @Override + public int updateBlobReplication(String key, int replication, Subject who) throws AuthorizationException, KeyNotFoundException { + who = checkAndGetSubject(who); + validateKey(key); + SettableBlobMeta meta = extractBlobMeta(key); + meta.set_replication_factor(replication); + aclHandler.hasAnyPermissions(meta.get_acl(), WRITE | ADMIN, who, key); + try { + writeMetadata(key, meta); + int updatedReplCount = hbs.updateBlobReplication(DATA_PREFIX + key, replication); + cachedReplicationCount.put(key, updatedReplCount); + return updatedReplCount; + } catch (IOException exp) { + throw new RuntimeException(exp); + } + } + + public void writeMetadata(String key, SettableBlobMeta meta) + throws AuthorizationException, KeyNotFoundException { + BlobStoreFileOutputStream outputStream = null; + try { + BlobStoreFile hdfsFile = hbs.write(META_PREFIX + key, false); + hdfsFile.setMetadata(meta); + outputStream = new BlobStoreFileOutputStream(hdfsFile); + outputStream.write(Utils.thriftSerialize(meta)); + outputStream.close(); + outputStream = null; + cacheMetas.put(key, meta); + } catch (IOException exp) { + throw new RuntimeException(exp); + } finally { + if (outputStream != null) { + try { + outputStream.cancel(); + } catch (IOException e) { + //Ignored + } + } + } + } + + public void fullCleanup(long age) throws IOException { + hbs.fullCleanup(age); + } + + public long getLastBlobUpdateTime() throws IOException { + return hbs.getLastBlobUpdateTime(); + } + + @Override + public void updateLastBlobUpdateTime() throws IOException { + hbs.updateLastBlobUpdateTime(); + } + + @Override + public void validateBlobUpdateTime() throws IOException { + hbs.validateBlobUpdateTime(); + } +} diff --git a/external/storm-hdfs-blobstore/src/main/java/org/apache/storm/hdfs/blobstore/HdfsBlobStoreFile.java b/external/storm-hdfs-blobstore/src/main/java/org/apache/storm/hdfs/blobstore/HdfsBlobStoreFile.java new file mode 100644 index 00000000000..f124cdfd0fe --- /dev/null +++ b/external/storm-hdfs-blobstore/src/main/java/org/apache/storm/hdfs/blobstore/HdfsBlobStoreFile.java @@ -0,0 +1,198 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.hdfs.blobstore; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.regex.Matcher; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileContext; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Options; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.storm.blobstore.BlobStoreFile; +import org.apache.storm.generated.SettableBlobMeta; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class HdfsBlobStoreFile extends BlobStoreFile { + + // files are world-wide readable and owner writable + public static final FsPermission BLOBSTORE_FILE_PERMISSION = + FsPermission.createImmutable((short) 0644); // rw-r--r-- + + private static final Logger LOG = LoggerFactory.getLogger(HdfsBlobStoreFile.class); + + private final String key; + private final boolean isTmp; + private final Path path; + private final boolean mustBeNew; + private final Configuration hadoopConf; + private final FileSystem fileSystem; + private SettableBlobMeta settableBlobMeta; + + public HdfsBlobStoreFile(Path base, String name, Configuration hconf) { + if (BLOBSTORE_DATA_FILE.equals(name)) { + isTmp = false; + } else { + Matcher m = TMP_NAME_PATTERN.matcher(name); + if (!m.matches()) { + throw new IllegalArgumentException("File name does not match '" + name + "' !~ " + TMP_NAME_PATTERN); + } + isTmp = true; + } + hadoopConf = hconf; + key = base.getName(); + path = new Path(base, name); + mustBeNew = false; + try { + fileSystem = path.getFileSystem(hadoopConf); + } catch (IOException e) { + throw new RuntimeException("Error getting filesystem for path: " + path, e); + } + } + + public HdfsBlobStoreFile(Path base, boolean isTmp, boolean mustBeNew, Configuration hconf) { + key = base.getName(); + hadoopConf = hconf; + this.isTmp = isTmp; + this.mustBeNew = mustBeNew; + if (this.isTmp) { + path = new Path(base, System.currentTimeMillis() + TMP_EXT); + } else { + path = new Path(base, BLOBSTORE_DATA_FILE); + } + try { + fileSystem = path.getFileSystem(hadoopConf); + } catch (IOException e) { + throw new RuntimeException("Error getting filesystem for path: " + path, e); + } + } + + @Override + public void delete() throws IOException { + fileSystem.delete(path, true); + } + + @Override + public boolean isTmp() { + return isTmp; + } + + @Override + public String getKey() { + return key; + } + + @Override + public long getModTime() throws IOException { + return fileSystem.getFileStatus(path).getModificationTime(); + } + + private void checkIsNotTmp() { + if (!isTmp()) { + throw new IllegalStateException("Can only operate on a temporary blobstore file."); + } + } + + private void checkIsTmp() { + if (isTmp()) { + throw new IllegalStateException("Cannot operate on a temporary blobstore file."); + } + } + + @Override + public InputStream getInputStream() throws IOException { + checkIsTmp(); + return fileSystem.open(path); + } + + @Override + public OutputStream getOutputStream() throws IOException { + checkIsNotTmp(); + OutputStream out = null; + FsPermission fileperms = new FsPermission(BLOBSTORE_FILE_PERMISSION); + try { + out = fileSystem.create(path, (short) this.getMetadata().get_replication_factor()); + fileSystem.setPermission(path, fileperms); + fileSystem.setReplication(path, (short) this.getMetadata().get_replication_factor()); + } catch (IOException e) { + //Try to create the parent directory, may not work + FsPermission dirperms = new FsPermission(HdfsBlobStoreImpl.BLOBSTORE_DIR_PERMISSION); + if (!fileSystem.mkdirs(path.getParent(), dirperms)) { + LOG.warn("error creating parent dir: " + path.getParent()); + } + if (!fileSystem.getFileStatus(path.getParent()).getPermission().equals(dirperms)) { + LOG.warn("Directory {} created with unexpected permission {}.Set permission {} for this directory.", + path.getParent(), fileSystem.getFileStatus(path.getParent()).getPermission(), dirperms); + fileSystem.setPermission(path.getParent(), dirperms); + } + out = fileSystem.create(path, (short) this.getMetadata().get_replication_factor()); + fileSystem.setPermission(path, fileperms); + fileSystem.setReplication(path, (short) this.getMetadata().get_replication_factor()); + } + if (out == null) { + throw new IOException("Error in creating: " + path); + } + return out; + } + + @Override + public void commit() throws IOException { + checkIsNotTmp(); + // FileContext supports atomic rename, whereas FileSystem doesn't + FileContext fc = FileContext.getFileContext(hadoopConf); + Path dest = new Path(path.getParent(), BLOBSTORE_DATA_FILE); + if (mustBeNew) { + fc.rename(path, dest); + } else { + fc.rename(path, dest, Options.Rename.OVERWRITE); + } + // Note, we could add support for setting the replication factor + } + + @Override + public void cancel() throws IOException { + checkIsNotTmp(); + delete(); + } + + @Override + public String toString() { + return path + ":" + (isTmp ? "tmp" : BlobStoreFile.BLOBSTORE_DATA_FILE) + ":" + key; + } + + @Override + public long getFileLength() throws IOException { + return fileSystem.getFileStatus(path).getLen(); + } + + @Override + public SettableBlobMeta getMetadata() { + return settableBlobMeta; + } + + @Override + public void setMetadata(SettableBlobMeta meta) { + this.settableBlobMeta = meta; + } +} diff --git a/external/storm-hdfs-blobstore/src/main/java/org/apache/storm/hdfs/blobstore/HdfsBlobStoreImpl.java b/external/storm-hdfs-blobstore/src/main/java/org/apache/storm/hdfs/blobstore/HdfsBlobStoreImpl.java new file mode 100644 index 00000000000..455cdd78569 --- /dev/null +++ b/external/storm-hdfs-blobstore/src/main/java/org/apache/storm/hdfs/blobstore/HdfsBlobStoreImpl.java @@ -0,0 +1,395 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.hdfs.blobstore; + +import java.io.BufferedWriter; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.OutputStreamWriter; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.Timer; +import java.util.TimerTask; + +import org.apache.commons.io.IOUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.storm.Config; +import org.apache.storm.blobstore.BlobStoreFile; +import org.apache.storm.utils.ObjectReader; +import org.apache.storm.utils.Time; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +/** + * HDFS blob store impl. + */ +public class HdfsBlobStoreImpl { + + // blobstore directory is private! + public static final FsPermission BLOBSTORE_DIR_PERMISSION = + FsPermission.createImmutable((short) 0700); // rwx-------- + private static final String BLOBSTORE_UPDATE_TIME_FILE = "lastUpdatedBlobTime"; + + private static final Logger LOG = LoggerFactory.getLogger(HdfsBlobStoreImpl.class); + + private static final long FULL_CLEANUP_FREQ = 60 * 60 * 1000L; + private static final int BUCKETS = 1024; + private static final String BLOBSTORE_DATA = "data"; + + private Timer timer; + + private Path fullPath; + private FileSystem fileSystem; + private Configuration hadoopConf; + + public class KeyInHashDirIterator implements Iterator { + private int currentBucket = 0; + private Iterator it = null; + private String next = null; + + public KeyInHashDirIterator() throws IOException { + primeNext(); + } + + private void primeNext() throws IOException { + while (it == null && currentBucket < BUCKETS) { + String name = String.valueOf(currentBucket); + Path dir = new Path(fullPath, name); + try { + it = listKeys(dir); + } catch (FileNotFoundException e) { + it = null; + } + if (it == null || !it.hasNext()) { + it = null; + currentBucket++; + } else { + next = it.next(); + } + } + } + + @Override + public boolean hasNext() { + return next != null; + } + + @Override + public String next() { + if (!hasNext()) { + throw new NoSuchElementException(); + } + String current = next; + next = null; + if (it != null) { + if (!it.hasNext()) { + it = null; + currentBucket++; + try { + primeNext(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } else { + next = it.next(); + } + } + return current; + } + + @Override + public void remove() { + throw new UnsupportedOperationException("Delete Not Supported"); + } + } + + public HdfsBlobStoreImpl(Path path, Map conf) throws IOException { + this(path, conf, new Configuration()); + } + + public HdfsBlobStoreImpl(Path path, Map conf, + Configuration hconf) throws IOException { + LOG.debug("Blob store based in {}", path); + fullPath = path; + hadoopConf = hconf; + fileSystem = path.getFileSystem(hadoopConf); + + if (!fileSystem.exists(fullPath)) { + FsPermission perms = new FsPermission(BLOBSTORE_DIR_PERMISSION); + boolean success = fileSystem.mkdirs(fullPath, perms); + if (!fileSystem.getFileStatus(fullPath).getPermission().equals(perms)) { + LOG.warn("Directory {} created with unexpected permission {}.Set permission {} for this directory.", + fullPath, fileSystem.getFileStatus(fullPath).getPermission(), perms); + fileSystem.setPermission(fullPath, perms); + } + if (!success) { + throw new IOException("Error creating blobstore directory: " + fullPath); + } + } + + Object shouldCleanup = conf.get(Config.BLOBSTORE_CLEANUP_ENABLE); + if (ObjectReader.getBoolean(shouldCleanup, false)) { + LOG.debug("Starting hdfs blobstore cleaner"); + TimerTask cleanup = new TimerTask() { + @Override + public void run() { + try { + fullCleanup(FULL_CLEANUP_FREQ); + } catch (IOException e) { + LOG.error("Error trying to cleanup", e); + } + } + }; + timer = new Timer("HdfsBlobStore cleanup thread", true); + timer.scheduleAtFixedRate(cleanup, 0, FULL_CLEANUP_FREQ); + } + } + + /** + * List relevant keys. + * + * @return all keys that are available for reading + * @throws IOException on any error + */ + public Iterator listKeys() throws IOException { + return new KeyInHashDirIterator(); + } + + protected Iterator listKeys(Path path) throws IOException { + ArrayList ret = new ArrayList(); + FileStatus[] files = fileSystem.listStatus(new Path[]{path}); + if (files != null) { + for (FileStatus sub : files) { + try { + ret.add(sub.getPath().getName().toString()); + } catch (IllegalArgumentException e) { + //Ignored the file did not match + LOG.debug("Found an unexpected file in {} {}", path, sub.getPath().getName()); + } + } + } + return ret.iterator(); + } + + /** + * Get an input stream for reading a part. + * + * @param key the key of the part to read + * @return the where to read the data from + * @throws IOException on any error + */ + public BlobStoreFile read(String key) throws IOException { + return new HdfsBlobStoreFile(getKeyDir(key), BLOBSTORE_DATA, hadoopConf); + } + + /** + * Get an object tied to writing the data. + * + * @param key the key of the part to write to. + * @param create whether the file needs to be new or not. + * @return an object that can be used to both write to, but also commit/cancel the operation. + * @throws IOException on any error + */ + public BlobStoreFile write(String key, boolean create) throws IOException { + return new HdfsBlobStoreFile(getKeyDir(key), true, create, hadoopConf); + } + + /** + * Check if the key exists in the blob store. + * + * @param key the key to check for + * @return true if it exists else false. + */ + public boolean exists(String key) { + Path dir = getKeyDir(key); + boolean res = false; + try { + fileSystem = dir.getFileSystem(hadoopConf); + res = fileSystem.exists(dir); + } catch (IOException e) { + LOG.warn("Exception checking for exists on: " + key); + } + return res; + } + + /** + * Delete a key from the blob store. + * + * @param key the key to delete + * @throws IOException on any error + */ + public void deleteKey(String key) throws IOException { + Path keyDir = getKeyDir(key); + HdfsBlobStoreFile pf = new HdfsBlobStoreFile(keyDir, BLOBSTORE_DATA, + hadoopConf); + pf.delete(); + delete(keyDir); + } + + protected Path getKeyDir(String key) { + String hash = String.valueOf(Math.abs((long) key.hashCode()) % BUCKETS); + Path hashDir = new Path(fullPath, hash); + + Path ret = new Path(hashDir, key); + LOG.debug("{} Looking for {} in {}", new Object[]{fullPath, key, hash}); + return ret; + } + + public void fullCleanup(long age) throws IOException { + long cleanUpIfBefore = System.currentTimeMillis() - age; + Iterator keys = new KeyInHashDirIterator(); + while (keys.hasNext()) { + String key = keys.next(); + Path keyDir = getKeyDir(key); + Iterator i = listBlobStoreFiles(keyDir); + if (!i.hasNext()) { + //The dir is empty, so try to delete it, may fail, but that is OK + try { + fileSystem.delete(keyDir, true); + } catch (Exception e) { + LOG.warn("Could not delete " + keyDir + " will try again later"); + } + } + while (i.hasNext()) { + BlobStoreFile f = i.next(); + if (f.isTmp()) { + if (f.getModTime() <= cleanUpIfBefore) { + f.delete(); + } + } + } + } + } + + protected Iterator listBlobStoreFiles(Path path) throws IOException { + ArrayList ret = new ArrayList(); + FileStatus[] files = fileSystem.listStatus(new Path[]{path}); + if (files != null) { + for (FileStatus sub : files) { + try { + ret.add(new HdfsBlobStoreFile(sub.getPath().getParent(), sub.getPath().getName(), + hadoopConf)); + } catch (IllegalArgumentException e) { + //Ignored the file did not match + LOG.warn("Found an unexpected file in {} {}", path, sub.getPath().getName()); + } + } + } + return ret.iterator(); + } + + protected int getBlobReplication(String key) throws IOException { + Path path = getKeyDir(key); + Path dest = new Path(path, BLOBSTORE_DATA); + return fileSystem.getFileStatus(dest).getReplication(); + } + + protected int updateBlobReplication(String key, int replication) throws IOException { + Path path = getKeyDir(key); + Path dest = new Path(path, BLOBSTORE_DATA); + fileSystem.setReplication(dest, (short) replication); + return fileSystem.getFileStatus(dest).getReplication(); + } + + protected void delete(Path path) throws IOException { + fileSystem.delete(path, true); + } + + public void shutdown() { + if (timer != null) { + timer.cancel(); + } + } + + /** + * Get the last update time of any blob. + * + * @return the last updated time of blobs within the blobstore. + * @throws IOException on any error + */ + public long getLastBlobUpdateTime() throws IOException { + Path updateTimeFile = new Path(fullPath, BLOBSTORE_UPDATE_TIME_FILE); + if (!fileSystem.exists(updateTimeFile)) { + return -1L; + } + FSDataInputStream inputStream = fileSystem.open(updateTimeFile); + String timestamp = IOUtils.toString(inputStream, "UTF-8"); + inputStream.close(); + try { + long updateTime = Long.parseLong(timestamp); + return updateTime; + } catch (NumberFormatException e) { + LOG.error("Invalid blobstore update time {} in file {}", timestamp, updateTimeFile); + return -1L; + } + } + + /** + * Updates the last updated time of existing blobstores to the current time. + * + * @throws IOException on any error + */ + public synchronized void updateLastBlobUpdateTime() throws IOException { + Long timestamp = Time.currentTimeMillis(); + Path updateTimeFile = new Path(fullPath, BLOBSTORE_UPDATE_TIME_FILE); + FSDataOutputStream fsDataOutputStream = fileSystem.create(updateTimeFile, true); + BufferedWriter bufferedWriter = new BufferedWriter(new OutputStreamWriter(fsDataOutputStream, StandardCharsets.UTF_8)); + bufferedWriter.write(timestamp.toString()); + bufferedWriter.close(); + LOG.debug("Updated blobstore update time of {} to {}", updateTimeFile, timestamp); + } + + /** + * Validates that the last updated blob time of the blobstore is up to date with the current existing blobs. + * + * @throws IOException on any error + */ + public void validateBlobUpdateTime() throws IOException { + int currentBucket = 0; + long baseModTime = 0; + while (currentBucket < BUCKETS) { + String name = String.valueOf(currentBucket); + Path bucketDir = new Path(fullPath, name); + + // only consider bucket dirs that exist with files in them + if (fileSystem.exists(bucketDir) && fileSystem.listStatus(bucketDir).length > 0) { + long modtime = fileSystem.getFileStatus(bucketDir).getModificationTime(); + if (modtime > baseModTime) { + baseModTime = modtime; + } + } + + currentBucket++; + } + if (baseModTime > 0 && baseModTime > getLastBlobUpdateTime()) { + LOG.info("Blobstore update time requires an update to at least {}", baseModTime); + updateLastBlobUpdateTime(); + } + } +} diff --git a/external/storm-hdfs-blobstore/src/main/java/org/apache/storm/hdfs/blobstore/HdfsClientBlobStore.java b/external/storm-hdfs-blobstore/src/main/java/org/apache/storm/hdfs/blobstore/HdfsClientBlobStore.java new file mode 100644 index 00000000000..fbdc1866100 --- /dev/null +++ b/external/storm-hdfs-blobstore/src/main/java/org/apache/storm/hdfs/blobstore/HdfsClientBlobStore.java @@ -0,0 +1,142 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.hdfs.blobstore; + +import java.io.IOException; +import java.util.Iterator; +import java.util.Map; + +import org.apache.storm.blobstore.AtomicOutputStream; +import org.apache.storm.blobstore.ClientBlobStore; +import org.apache.storm.blobstore.InputStreamWithMeta; +import org.apache.storm.generated.AuthorizationException; +import org.apache.storm.generated.KeyAlreadyExistsException; +import org.apache.storm.generated.KeyNotFoundException; +import org.apache.storm.generated.ReadableBlobMeta; +import org.apache.storm.generated.SettableBlobMeta; +import org.apache.storm.utils.NimbusClient; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Client to access the HDFS blobStore. At this point, this is meant to only be used by the + * supervisor. Don't trust who the client says they are so pass null for all Subjects. + * + *

The HdfsBlobStore implementation takes care of the null Subjects. It assigns Subjects + * based on what hadoop says who the users are. These users must be configured accordingly + * in the SUPERVISOR_ADMINS for ACL validation and for the supervisors to download the blobs. + * This API is only used by the supervisor in order to talk directly to HDFS. + */ +public class HdfsClientBlobStore extends ClientBlobStore { + private static final Logger LOG = LoggerFactory.getLogger(HdfsClientBlobStore.class); + private HdfsBlobStore blobStore; + private Map conf; + private NimbusClient client; + + @Override + public void prepare(Map conf) { + this.conf = conf; + blobStore = new HdfsBlobStore(); + blobStore.prepare(conf, null, null, null); + } + + @Override + public AtomicOutputStream createBlobToExtend(String key, SettableBlobMeta meta) + throws AuthorizationException, KeyAlreadyExistsException { + return blobStore.createBlob(key, meta, null); + } + + @Override + public AtomicOutputStream updateBlob(String key) + throws AuthorizationException, KeyNotFoundException { + return blobStore.updateBlob(key, null); + } + + @Override + public ReadableBlobMeta getBlobMeta(String key) + throws AuthorizationException, KeyNotFoundException { + return blobStore.getBlobMeta(key, null); + } + + @Override + public boolean isRemoteBlobExists(String blobKey) throws AuthorizationException { + return blobStore.blobExists(blobKey, null); + } + + @Override + public void setBlobMetaToExtend(String key, SettableBlobMeta meta) + throws AuthorizationException, KeyNotFoundException { + blobStore.setBlobMeta(key, meta, null); + } + + @Override + public void deleteBlob(String key) throws AuthorizationException, KeyNotFoundException { + blobStore.deleteBlob(key, null); + } + + @Override + public InputStreamWithMeta getBlob(String key) + throws AuthorizationException, KeyNotFoundException { + return blobStore.getBlob(key, null); + } + + @Override + public Iterator listKeys() { + return blobStore.listKeys(); + } + + @Override + public int getBlobReplication(String key) throws AuthorizationException, KeyNotFoundException { + return blobStore.getBlobReplication(key, null); + } + + @Override + public int updateBlobReplication(String key, int replication) throws AuthorizationException, KeyNotFoundException { + return blobStore.updateBlobReplication(key, replication, null); + } + + @Override + public boolean setClient(Map conf, NimbusClient client) { + this.client = client; + return true; + } + + @Override + public void createStateInZookeeper(String key) { + // Do nothing + } + + @Override + public void shutdown() { + close(); + } + + @Override + public void close() { + if (client != null) { + client.close(); + client = null; + } + } + + @Override + public long getRemoteBlobstoreUpdateTime() throws IOException { + return blobStore.getLastBlobUpdateTime(); + } +} diff --git a/external/storm-hdfs-blobstore/src/test/java/org/apache/storm/hdfs/blobstore/BlobStoreTest.java b/external/storm-hdfs-blobstore/src/test/java/org/apache/storm/hdfs/blobstore/BlobStoreTest.java new file mode 100644 index 00000000000..206e6518a0c --- /dev/null +++ b/external/storm-hdfs-blobstore/src/test/java/org/apache/storm/hdfs/blobstore/BlobStoreTest.java @@ -0,0 +1,518 @@ + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.storm.hdfs.blobstore; + +import org.apache.storm.hdfs.testing.MiniDFSClusterExtension; +import org.apache.storm.Config; +import org.apache.storm.blobstore.AtomicOutputStream; +import org.apache.storm.blobstore.BlobStore; +import org.apache.storm.blobstore.BlobStoreAclHandler; +import org.apache.storm.generated.AccessControl; +import org.apache.storm.generated.AccessControlType; +import org.apache.storm.generated.AuthorizationException; +import org.apache.storm.generated.KeyNotFoundException; +import org.apache.storm.generated.SettableBlobMeta; +import org.apache.storm.security.auth.FixedGroupsMapping; +import org.apache.storm.security.auth.NimbusPrincipal; +import org.apache.storm.security.auth.SingleUserPrincipal; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.security.auth.Subject; +import java.io.IOException; +import java.io.InputStream; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.junit.jupiter.api.Assertions.*; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.RegisterExtension; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; +import org.junit.jupiter.params.provider.ValueSource; + +public class BlobStoreTest { + + @RegisterExtension + public static final MiniDFSClusterExtension DFS_CLUSTER_EXTENSION = new MiniDFSClusterExtension(); + + private static final Logger LOG = LoggerFactory.getLogger(BlobStoreTest.class); + private static final Map CONF = new HashMap<>(); + public static final int READ = 0x01; + public static final int ADMIN = 0x04; + + @BeforeEach + public void init() { + initializeConfigs(); + } + + @AfterEach + public void cleanup() { + } + + // Method which initializes nimbus admin + public static void initializeConfigs() { + CONF.put(Config.NIMBUS_ADMINS, "admin"); + CONF.put(Config.NIMBUS_ADMINS_GROUPS, "adminsGroup"); + + // Construct a groups mapping for the FixedGroupsMapping class + Map> groupsMapping = new HashMap<>(); + Set groupSet = new HashSet<>(); + groupSet.add("adminsGroup"); + groupsMapping.put("adminsGroupsUser", groupSet); + + // Now create a params map to put it in to our conf + Map paramMap = new HashMap<>(); + paramMap.put(FixedGroupsMapping.STORM_FIXED_GROUP_MAPPING, groupsMapping); + CONF.put(Config.STORM_GROUP_MAPPING_SERVICE_PROVIDER_PLUGIN, "org.apache.storm.security.auth.FixedGroupsMapping"); + CONF.put(Config.STORM_GROUP_MAPPING_SERVICE_PARAMS, paramMap); + CONF.put(Config.NIMBUS_SUPERVISOR_USERS, "supervisor"); + } + + //Gets Nimbus Subject with NimbusPrincipal set on it + public static Subject getNimbusSubject() { + Subject nimbus = new Subject(); + nimbus.getPrincipals().add(new NimbusPrincipal()); + return nimbus; + } + + // Overloading the assertStoreHasExactly method accommodate Subject in order to check for authorization + public static void assertStoreHasExactly(BlobStore store, Subject who, String... keys) { + Set expected = new HashSet<>(Arrays.asList(keys)); + Set found = new HashSet<>(); + Iterator c = store.listKeys(); + while (c.hasNext()) { + String keyName = c.next(); + found.add(keyName); + } + Set extra = new HashSet<>(found); + extra.removeAll(expected); + assertTrue(extra.isEmpty(), "Found extra keys in the blob store " + extra); + Set missing = new HashSet<>(expected); + missing.removeAll(found); + assertTrue(missing.isEmpty(), "Found keys missing from the blob store " + missing); + } + + public static void assertStoreHasExactly(BlobStore store, String... keys) { + assertStoreHasExactly(store, null, keys); + } + + // Overloading the readInt method accommodate Subject in order to check for authorization (security turned on) + public static int readInt(BlobStore store, Subject who, String key) throws IOException, KeyNotFoundException, AuthorizationException { + try (InputStream in = store.getBlob(key, who)) { + return in.read(); + } + } + + public static int readInt(BlobStore store, String key) + throws IOException, KeyNotFoundException, AuthorizationException { + return readInt(store, null, key); + } + + public static void readAssertEquals(BlobStore store, String key, int value) + throws IOException, KeyNotFoundException, AuthorizationException { + assertEquals(value, readInt(store, key)); + } + + // Checks for assertion when we turn on security + public void readAssertEqualsWithAuth(BlobStore store, Subject who, String key, int value) + throws IOException, KeyNotFoundException, AuthorizationException { + assertEquals(value, readInt(store, who, key)); + } + + private AutoCloseableBlobStoreContainer initHdfs(String dirName) { + Map conf = new HashMap<>(); + conf.put(Config.BLOBSTORE_DIR, dirName); + conf.put(Config.STORM_PRINCIPAL_TO_LOCAL_PLUGIN, "org.apache.storm.security.auth.DefaultPrincipalToLocal"); + conf.put(Config.STORM_BLOBSTORE_REPLICATION_FACTOR, 3); + HdfsBlobStore store = new HdfsBlobStore(); + store.prepareInternal(conf, null, DFS_CLUSTER_EXTENSION.getDfscluster().getConfiguration(0)); + return new AutoCloseableBlobStoreContainer(store); + } + + private static class AutoCloseableBlobStoreContainer implements AutoCloseable { + + private final HdfsBlobStore blobStore; + + public AutoCloseableBlobStoreContainer(HdfsBlobStore blobStore) { + this.blobStore = blobStore; + } + + @Override + public void close() { + this.blobStore.shutdown(); + } + + } + + @Test + public void testHdfsReplication() + throws Exception { + try (AutoCloseableBlobStoreContainer container = initHdfs("/storm/blobstoreReplication")) { + testReplication("/storm/blobstoreReplication/test", container.blobStore); + } + } + + @Test + public void testBasicHdfs() + throws Exception { + try (AutoCloseableBlobStoreContainer container = initHdfs("/storm/blobstore1")) { + testBasic(container.blobStore); + } + } + + @Test + public void testMultipleHdfs() + throws Exception { + // use different blobstore dir, so it doesn't conflict with other test + try (AutoCloseableBlobStoreContainer container = initHdfs("/storm/blobstore2")) { + testMultiple(container.blobStore); + } + } + + // Test for replication. + public void testReplication(String path, BlobStore store) + throws Exception { + SettableBlobMeta metadata = new SettableBlobMeta(BlobStoreAclHandler.WORLD_EVERYTHING); + metadata.set_replication_factor(4); + try (AtomicOutputStream out = store.createBlob("test", metadata, null)) { + out.write(1); + } + assertStoreHasExactly(store, "test"); + assertEquals(store.getBlobReplication("test", null), 4, "Blobstore replication not matching"); + store.deleteBlob("test", null); + + //Test for replication with NIMBUS as user + Subject admin = getSubject("admin"); + metadata = new SettableBlobMeta(BlobStoreAclHandler.DEFAULT); + metadata.set_replication_factor(4); + try (AtomicOutputStream out = store.createBlob("test", metadata, admin)) { + out.write(1); + } + assertStoreHasExactly(store, "test"); + assertEquals(store.getBlobReplication("test", admin), 4, "Blobstore replication not matching"); + store.updateBlobReplication("test", 5, admin); + assertEquals(store.getBlobReplication("test", admin), 5, "Blobstore replication not matching"); + store.deleteBlob("test", admin); + + //Test for replication using SUPERVISOR access + Subject supervisor = getSubject("supervisor"); + metadata = new SettableBlobMeta(BlobStoreAclHandler.DEFAULT); + metadata.set_replication_factor(4); + try (AtomicOutputStream out = store.createBlob("test", metadata, supervisor)) { + out.write(1); + } + assertStoreHasExactly(store, "test"); + assertEquals(store.getBlobReplication("test", supervisor), 4, "Blobstore replication not matching"); + store.updateBlobReplication("test", 5, supervisor); + assertEquals(store.getBlobReplication("test", supervisor), 5, "Blobstore replication not matching"); + store.deleteBlob("test", supervisor); + + Subject adminsGroupsUser = getSubject("adminsGroupsUser"); + metadata = new SettableBlobMeta(BlobStoreAclHandler.DEFAULT); + metadata.set_replication_factor(4); + try (AtomicOutputStream out = store.createBlob("test", metadata, adminsGroupsUser)) { + out.write(1); + } + assertStoreHasExactly(store, "test"); + assertEquals(store.getBlobReplication("test", adminsGroupsUser), 4, "Blobstore replication not matching"); + store.updateBlobReplication("test", 5, adminsGroupsUser); + assertEquals(store.getBlobReplication("test", adminsGroupsUser), 5, "Blobstore replication not matching"); + store.deleteBlob("test", adminsGroupsUser); + + //Test for a user having read or write or admin access to read replication for a blob + String createSubject = "createSubject"; + String writeSubject = "writeSubject"; + String adminSubject = "adminSubject"; + Subject who = getSubject(createSubject); + AccessControl writeAccess = new AccessControl(AccessControlType.USER, READ); + AccessControl adminAccess = new AccessControl(AccessControlType.USER, ADMIN); + writeAccess.set_name(writeSubject); + adminAccess.set_name(adminSubject); + List acl = Arrays.asList(writeAccess, adminAccess); + metadata = new SettableBlobMeta(acl); + metadata.set_replication_factor(4); + try (AtomicOutputStream out = store.createBlob("test", metadata, who)) { + out.write(1); + } + assertStoreHasExactly(store, "test"); + who = getSubject(writeSubject); + assertEquals(store.getBlobReplication("test", who), 4, "Blobstore replication not matching"); + + //Test for a user having WRITE or ADMIN privileges to change replication of a blob + who = getSubject(adminSubject); + store.updateBlobReplication("test", 5, who); + assertEquals(store.getBlobReplication("test", who), 5, "Blobstore replication not matching"); + store.deleteBlob("test", getSubject(createSubject)); + } + + public static Subject getSubject(String name) { + Subject subject = new Subject(); + SingleUserPrincipal user = new SingleUserPrincipal(name); + subject.getPrincipals().add(user); + return subject; + } + + enum AuthenticationTestSubject { + //Nimbus Admin + ADMIN(getSubject("admin")), + //Nimbus groups admin + ADMIN_GROUPS_USER(getSubject("adminGroupsUser")), + //Supervisor admin + SUPERVISOR(getSubject("supervisor")), + //Nimbus itself + NIMBUS(getNimbusSubject()); + + private final Subject subject; + + AuthenticationTestSubject(Subject subject) { + this.subject = subject; + } + } + + @ParameterizedTest + @EnumSource(value = AuthenticationTestSubject.class) + void testWithAuthentication(AuthenticationTestSubject testSubject) throws Exception { + try (AutoCloseableBlobStoreContainer container = initHdfs("/storm/blobstore-auth-" + testSubject.name())) { + BlobStore store = container.blobStore; + assertStoreHasExactly(store); + SettableBlobMeta metadata = new SettableBlobMeta(BlobStoreAclHandler.DEFAULT); + try (AtomicOutputStream out = store.createBlob("test", metadata, testSubject.subject)) { + assertStoreHasExactly(store, "test"); + out.write(1); + } + store.deleteBlob("test", testSubject.subject); + } + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + void testWithAuthenticationDummy(boolean securityEnabled) throws Exception { + try (AutoCloseableBlobStoreContainer container = initHdfs("/storm/blobstore-auth-dummy-sec-" + securityEnabled)) { + BlobStore store = container.blobStore; + Subject who = getSubject("test_subject"); + assertStoreHasExactly(store); + + // Tests for case when subject != null (security turned on) and + // acls for the blob are set to WORLD_EVERYTHING + SettableBlobMeta metadata = new SettableBlobMeta(securityEnabled ? BlobStoreAclHandler.DEFAULT : BlobStoreAclHandler.WORLD_EVERYTHING); + try (AtomicOutputStream out = store.createBlob("test", metadata, who)) { + out.write(1); + } + assertStoreHasExactly(store, "test"); + if (securityEnabled) { + // Testing whether acls are set to WORLD_EVERYTHING. Here the acl should not contain WORLD_EVERYTHING because + // the subject is neither null nor empty. The ACL should however contain USER_EVERYTHING as user needs to have + // complete access to the blob + assertFalse(metadata.toString().contains("AccessControl(type:OTHER, access:7)"), "ACL contains WORLD_EVERYTHING"); + } else { + // Testing whether acls are set to WORLD_EVERYTHING + assertTrue(metadata.toString().contains("AccessControl(type:OTHER, access:7)"), "ACL does not contain WORLD_EVERYTHING"); + } + + readAssertEqualsWithAuth(store, who, "test", 1); + + LOG.info("Deleting test"); + store.deleteBlob("test", who); + assertStoreHasExactly(store); + } + } + + @Test + void testWithAuthenticationUpdate() throws Exception { + try (AutoCloseableBlobStoreContainer container = initHdfs("/storm/blobstore-auth-update")) { + BlobStore store = container.blobStore; + Subject who = getSubject("test_subject"); + assertStoreHasExactly(store); + + SettableBlobMeta metadata = new SettableBlobMeta(BlobStoreAclHandler.DEFAULT); + try (AtomicOutputStream out = store.createBlob("test", metadata, who)) { + out.write(1); + } + assertStoreHasExactly(store, "test"); + readAssertEqualsWithAuth(store, who, "test", 1); + + try (AtomicOutputStream out = store.updateBlob("test", who)) { + out.write(2); + } + assertStoreHasExactly(store, "test"); + readAssertEqualsWithAuth(store, who, "test", 2); + + try (AtomicOutputStream out = store.updateBlob("test", who)) { + out.write(3); + } + assertStoreHasExactly(store, "test"); + readAssertEqualsWithAuth(store, who, "test", 3); + + LOG.info("Deleting test"); + store.deleteBlob("test", who); + assertStoreHasExactly(store); + } + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + void testWithAuthenticationNoPrincipal(boolean securityEnabled) throws Exception { + try (AutoCloseableBlobStoreContainer container = initHdfs("/storm/blobstore-auth-no-principal-sec-" + securityEnabled)) { + BlobStore store = container.blobStore; + //Test for subject with no principals + Subject who = new Subject(); + assertStoreHasExactly(store); + + // Tests for case when subject != null (security turned on) and + // acls for the blob are set to WORLD_EVERYTHING + SettableBlobMeta metadata = new SettableBlobMeta(securityEnabled ? BlobStoreAclHandler.DEFAULT : BlobStoreAclHandler.WORLD_EVERYTHING); + try (AtomicOutputStream out = store.createBlob("test", metadata, who)) { + out.write(1); + } + assertStoreHasExactly(store, "test"); + // With no principals in the subject ACL should always be set to WORLD_EVERYTHING + assertTrue(metadata.toString().contains("AccessControl(type:OTHER, access:7)"), "ACL does not contain WORLD_EVERYTHING"); + + readAssertEqualsWithAuth(store, who, "test", 1); + } + } + + public void testBasic(BlobStore store) + throws Exception { + assertStoreHasExactly(store); + LOG.info("Creating test"); + // Tests for case when subject == null (security turned off) and + // acls for the blob are set to WORLD_EVERYTHING + SettableBlobMeta metadata = new SettableBlobMeta(BlobStoreAclHandler.WORLD_EVERYTHING); + try (AtomicOutputStream out = store.createBlob("test", metadata, null)) { + out.write(1); + } + assertStoreHasExactly(store, "test"); + // Testing whether acls are set to WORLD_EVERYTHING + assertTrue(metadata.toString().contains("AccessControl(type:OTHER, access:7)"), "ACL does not contain WORLD_EVERYTHING"); + readAssertEquals(store, "test", 1); + + LOG.info("Deleting test"); + store.deleteBlob("test", null); + assertStoreHasExactly(store); + + // The following tests are run for both hdfs and local store to test the + // update blob interface + metadata = new SettableBlobMeta(BlobStoreAclHandler.WORLD_EVERYTHING); + LOG.info("Creating test again"); + try (AtomicOutputStream out = store.createBlob("test", metadata, null)) { + out.write(2); + } + assertStoreHasExactly(store, "test"); + readAssertEquals(store, "test", 2); + LOG.info("Updating test"); + try (AtomicOutputStream out = store.updateBlob("test", null)) { + out.write(3); + } + assertStoreHasExactly(store, "test"); + readAssertEquals(store, "test", 3); + + LOG.info("Updating test again"); + try (AtomicOutputStream out = store.updateBlob("test", null)) { + out.write(4); + } + LOG.info("SLEEPING"); + Thread.sleep(2); + + if (store instanceof HdfsBlobStore) { + ((HdfsBlobStore) store).fullCleanup(1); + } else { + fail("Error the blobstore is of unknowntype"); + } + } + + public void testMultiple(BlobStore store) + throws Exception { + assertStoreHasExactly(store); + LOG.info("Creating test"); + try (AtomicOutputStream out = store.createBlob("test", new SettableBlobMeta(BlobStoreAclHandler.WORLD_EVERYTHING), null)) { + out.write(1); + } + assertStoreHasExactly(store, "test"); + readAssertEquals(store, "test", 1); + + LOG.info("Creating other"); + try (AtomicOutputStream out = store.createBlob("other", new SettableBlobMeta(BlobStoreAclHandler.WORLD_EVERYTHING), + null)) { + out.write(2); + } + assertStoreHasExactly(store, "test", "other"); + readAssertEquals(store, "test", 1); + readAssertEquals(store, "other", 2); + + LOG.info("Updating other"); + try (AtomicOutputStream out = store.updateBlob("other", null)) { + out.write(5); + } + assertStoreHasExactly(store, "test", "other"); + readAssertEquals(store, "test", 1); + readAssertEquals(store, "other", 5); + + LOG.info("Deleting test"); + store.deleteBlob("test", null); + assertStoreHasExactly(store, "other"); + readAssertEquals(store, "other", 5); + + LOG.info("Creating test again"); + try (AtomicOutputStream out = store.createBlob("test", new SettableBlobMeta(BlobStoreAclHandler.WORLD_EVERYTHING), + null)) { + out.write(2); + } + assertStoreHasExactly(store, "test", "other"); + readAssertEquals(store, "test", 2); + readAssertEquals(store, "other", 5); + + LOG.info("Updating test"); + try (AtomicOutputStream out = store.updateBlob("test", null)) { + out.write(3); + } + assertStoreHasExactly(store, "test", "other"); + readAssertEquals(store, "test", 3); + readAssertEquals(store, "other", 5); + + LOG.info("Deleting other"); + store.deleteBlob("other", null); + assertStoreHasExactly(store, "test"); + readAssertEquals(store, "test", 3); + + LOG.info("Updating test again"); + try (AtomicOutputStream out = store.updateBlob("test", null)) { + out.write(4); + } + LOG.info("SLEEPING"); + Thread.sleep(2); + + if (store instanceof HdfsBlobStore) { + ((HdfsBlobStore) store).fullCleanup(1); + } else { + fail("Error the blobstore is of unknowntype"); + } + assertStoreHasExactly(store, "test"); + readAssertEquals(store, "test", 4); + } +} diff --git a/external/storm-hdfs-blobstore/src/test/java/org/apache/storm/hdfs/blobstore/HdfsBlobStoreImplTest.java b/external/storm-hdfs-blobstore/src/test/java/org/apache/storm/hdfs/blobstore/HdfsBlobStoreImplTest.java new file mode 100644 index 00000000000..f596e4591df --- /dev/null +++ b/external/storm-hdfs-blobstore/src/test/java/org/apache/storm/hdfs/blobstore/HdfsBlobStoreImplTest.java @@ -0,0 +1,316 @@ + +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.storm.hdfs.blobstore; + +import org.apache.commons.io.IOUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.storm.blobstore.BlobStoreFile; +import org.apache.storm.generated.SettableBlobMeta; +import org.apache.storm.hdfs.testing.MiniDFSClusterExtensionClassLevel; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.RegisterExtension; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.*; + +public class HdfsBlobStoreImplTest { + + @RegisterExtension + public static final MiniDFSClusterExtensionClassLevel DFS_CLUSTER_EXTENSION = new MiniDFSClusterExtensionClassLevel(); + + private static final Logger LOG = LoggerFactory.getLogger(HdfsBlobStoreImplTest.class); + public static final String CONCURRENT_TEST_KEY_PREFIX = "concurrent-test-key"; + + // key dir needs to be number 0 to number of buckets, choose one so we know where to look + private static final String KEYDIR = "0"; + private final Path blobDir = new Path("/storm/blobstore1"); + private final Path fullKeyDir = new Path(blobDir, KEYDIR); + private final String BLOBSTORE_DATA = "data"; + // for concurrent test + private Path concurrentTestBlobDir = new Path("/storm/blobstore2"); + private Path concurrentTestFullKeyDir = new Path(concurrentTestBlobDir, KEYDIR); + + public class TestHdfsBlobStoreImpl extends HdfsBlobStoreImpl implements AutoCloseable { + + Path basePath; + public TestHdfsBlobStoreImpl(Path path, Map conf) throws IOException { + super(path, conf); + basePath = path; + } + + public TestHdfsBlobStoreImpl(Path path, Map conf, + Configuration hconf) throws IOException { + super(path, conf, hconf); + basePath = path; + } + + @Override + protected Path getKeyDir(String key) { + return new Path(new Path(basePath, KEYDIR), key); + } + + @Override + public void close() { + this.shutdown(); + } + } + + // Be careful about adding additional tests as the dfscluster will be shared + @Test + public void testMultiple() throws Exception { + String testString = "testingblob"; + String validKey = "validkeyBasic"; + + //Will be closed automatically when shutting down the DFS cluster + FileSystem fs = DFS_CLUSTER_EXTENSION.getDfscluster().getFileSystem(); + Map conf = new HashMap<>(); + + try (TestHdfsBlobStoreImpl hbs = new TestHdfsBlobStoreImpl(blobDir, conf, DFS_CLUSTER_EXTENSION.getHadoopConf())) { + // should have created blobDir + assertTrue(fs.exists(blobDir), "BlobStore dir wasn't created"); + assertEquals(HdfsBlobStoreImpl.BLOBSTORE_DIR_PERMISSION, fs.getFileStatus(blobDir).getPermission(), + "BlobStore dir was created with wrong permissions"); + + // test exist with non-existent key + assertFalse(hbs.exists("bogus"), "file exists but shouldn't"); + + // test write + BlobStoreFile pfile = hbs.write(validKey, false); + // Adding metadata to avoid null pointer exception + SettableBlobMeta meta = new SettableBlobMeta(); + meta.set_replication_factor(1); + pfile.setMetadata(meta); + try (OutputStream ios = pfile.getOutputStream()) { + ios.write(testString.getBytes(StandardCharsets.UTF_8)); + } + + // test modTime can change + long initialModTime = pfile.getModTime(); + try (OutputStream ios = pfile.getOutputStream()) { + ios.write(testString.getBytes(StandardCharsets.UTF_8)); + } + long nextModTime = pfile.getModTime(); + assertTrue(nextModTime > initialModTime); + + // test commit creates properly + assertTrue(fs.exists(fullKeyDir), "BlobStore key dir wasn't created"); + pfile.commit(); + Path dataFile = new Path(new Path(fullKeyDir, validKey), BLOBSTORE_DATA); + assertTrue(fs.exists(dataFile), "blob data not committed"); + assertEquals(HdfsBlobStoreFile.BLOBSTORE_FILE_PERMISSION, fs.getFileStatus(dataFile).getPermission(), + "BlobStore dir was created with wrong permissions"); + assertTrue(hbs.exists(validKey), "key doesn't exist but should"); + + // test read + BlobStoreFile readpFile = hbs.read(validKey); + try (InputStream inStream = readpFile.getInputStream()) { + String readString = IOUtils.toString(inStream, StandardCharsets.UTF_8); + assertEquals(testString, readString, "string read from blob doesn't match"); + } + + // test listkeys + Iterator keys = hbs.listKeys(); + assertTrue(keys.hasNext(), "blob has one key"); + assertEquals(validKey, keys.next(), "one key in blobstore"); + + // delete + hbs.deleteKey(validKey); + assertFalse(fs.exists(dataFile), "key not deleted"); + assertFalse(hbs.exists(validKey), "key not deleted"); + + // Now do multiple + String testString2 = "testingblob2"; + String validKey2 = "validkey2"; + + // test write + pfile = hbs.write(validKey, false); + pfile.setMetadata(meta); + try (OutputStream ios = pfile.getOutputStream()) { + ios.write(testString.getBytes(StandardCharsets.UTF_8)); + } + + // test commit creates properly + assertTrue(fs.exists(fullKeyDir), "BlobStore key dir wasn't created"); + pfile.commit(); + assertTrue(fs.exists(dataFile), "blob data not committed"); + assertEquals(HdfsBlobStoreFile.BLOBSTORE_FILE_PERMISSION, fs.getFileStatus(dataFile).getPermission(), + "BlobStore dir was created with wrong permissions"); + assertTrue(hbs.exists(validKey), "key doesn't exist but should"); + + // test write again + pfile = hbs.write(validKey2, false); + pfile.setMetadata(meta); + try (OutputStream ios2 = pfile.getOutputStream()) { + ios2.write(testString2.getBytes(StandardCharsets.UTF_8)); + } + + // test commit second creates properly + pfile.commit(); + Path dataFile2 = new Path(new Path(fullKeyDir, validKey2), BLOBSTORE_DATA); + assertTrue(fs.exists(dataFile2), "blob data not committed"); + assertEquals(HdfsBlobStoreFile.BLOBSTORE_FILE_PERMISSION, fs.getFileStatus(dataFile2).getPermission(), + "BlobStore dir was created with wrong permissions"); + assertTrue(hbs.exists(validKey2), "key doesn't exist but should"); + + // test listkeys + keys = hbs.listKeys(); + int total = 0; + boolean key1Found = false; + boolean key2Found = false; + while (keys.hasNext()) { + total++; + String key = keys.next(); + if (key.equals(validKey)) { + key1Found = true; + } else if (key.equals(validKey2)) { + key2Found = true; + } else { + fail("Found key that wasn't expected: " + key); + } + } + assertEquals(2, total, "number of keys is wrong"); + assertTrue(key1Found, "blobstore missing key1"); + assertTrue(key2Found, "blobstore missing key2"); + + // test read + readpFile = hbs.read(validKey); + try (InputStream inStream = readpFile.getInputStream()) { + String readString = IOUtils.toString(inStream, StandardCharsets.UTF_8); + assertEquals(testString, readString, "string read from blob doesn't match"); + } + + // test read + readpFile = hbs.read(validKey2); + try (InputStream inStream = readpFile.getInputStream()) { + String readString = IOUtils.toString(inStream, StandardCharsets.UTF_8); + assertEquals(testString2, readString, "string read from blob doesn't match"); + } + + hbs.deleteKey(validKey); + assertFalse(hbs.exists(validKey), "key not deleted"); + hbs.deleteKey(validKey2); + assertFalse(hbs.exists(validKey2), "key not deleted"); + } + } + + @Test + public void testGetFileLength() throws Exception { + Map conf = new HashMap<>(); + String validKey = "validkeyBasic"; + String testString = "testingblob"; + try (TestHdfsBlobStoreImpl hbs = new TestHdfsBlobStoreImpl(blobDir, conf, DFS_CLUSTER_EXTENSION.getHadoopConf())) { + BlobStoreFile pfile = hbs.write(validKey, false); + // Adding metadata to avoid null pointer exception + SettableBlobMeta meta = new SettableBlobMeta(); + meta.set_replication_factor(1); + pfile.setMetadata(meta); + try (OutputStream ios = pfile.getOutputStream()) { + ios.write(testString.getBytes(StandardCharsets.UTF_8)); + } + assertEquals(testString.getBytes(StandardCharsets.UTF_8).length, pfile.getFileLength()); + } + } + + /** + * Test by listing keys {@link HdfsBlobStoreImpl#listKeys()} in multiple concurrent threads and then ensure that + * same keys are retrived in all the threads without any exceptions. + */ + @Test + public void testConcurrentIteration() throws Exception { + int concurrency = 100; + int keyCount = 10; + + class ConcurrentListerRunnable implements Runnable { + TestHdfsBlobStoreImpl hbs; + int instanceNum; + List keys = new ArrayList<>(); + + public ConcurrentListerRunnable(TestHdfsBlobStoreImpl hbs, int instanceNum) { + this.hbs = hbs; + this.instanceNum = instanceNum; + } + + @Override + public void run() { + try { + Iterator iterator = hbs.listKeys(concurrentTestFullKeyDir); + while (iterator.hasNext()) { + keys.add(iterator.next()); + } + } catch (Exception ex) { + ex.printStackTrace(); + } + } + } + + Map conf = new HashMap<>(); + try (TestHdfsBlobStoreImpl hbs = new TestHdfsBlobStoreImpl(concurrentTestBlobDir, conf, DFS_CLUSTER_EXTENSION.getHadoopConf())) { + // test write again + for (int i = 0 ; i < keyCount ; i++) { + String key = CONCURRENT_TEST_KEY_PREFIX + i; + String val = "This is string " + i; + BlobStoreFile pfile = hbs.write(key, false); + SettableBlobMeta meta = new SettableBlobMeta(); + meta.set_replication_factor(1); + pfile.setMetadata(meta); + try (OutputStream ios = pfile.getOutputStream()) { + ios.write(val.getBytes(StandardCharsets.UTF_8)); + } + } + + + ConcurrentListerRunnable[] runnables = new ConcurrentListerRunnable[concurrency]; + Thread[] threads = new Thread[concurrency]; + for (int i = 0 ; i < concurrency ; i++) { + runnables[i] = new ConcurrentListerRunnable(hbs, i); + threads[i] = new Thread(runnables[i]); + } + for (int i = 0 ; i < concurrency ; i++) { + threads[i].start(); + } + for (int i = 0 ; i < concurrency ; i++) { + threads[i].join(); + } + List keys = runnables[0].keys; + assertEquals(keyCount, keys.size(), "Number of keys (values=" + keys + ")"); + for (int i = 1 ; i < concurrency ; i++) { + ConcurrentListerRunnable otherRunnable = runnables[i]; + assertEquals(keys, otherRunnable.keys); + } + for (int i = 0 ; i < keyCount ; i++) { + String key = CONCURRENT_TEST_KEY_PREFIX + i; + hbs.deleteKey(key); + } + LOG.info("All %d threads have %d keys=[%s]\n", concurrency, keys.size(), String.join(",", keys)); + } + } +} diff --git a/external/storm-hdfs-blobstore/src/test/java/org/apache/storm/hdfs/testing/MiniDFSClusterExtension.java b/external/storm-hdfs-blobstore/src/test/java/org/apache/storm/hdfs/testing/MiniDFSClusterExtension.java new file mode 100644 index 00000000000..8bf6b0b3c5b --- /dev/null +++ b/external/storm-hdfs-blobstore/src/test/java/org/apache/storm/hdfs/testing/MiniDFSClusterExtension.java @@ -0,0 +1,98 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.storm.hdfs.testing; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.junit.jupiter.api.extension.AfterEachCallback; +import org.junit.jupiter.api.extension.BeforeEachCallback; +import org.junit.jupiter.api.extension.ExtensionContext; + +import java.io.File; +import java.util.function.Supplier; + +import static org.apache.hadoop.test.GenericTestUtils.DEFAULT_TEST_DATA_DIR; +import static org.apache.hadoop.test.GenericTestUtils.SYSPROP_TEST_DATA_DIR; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class MiniDFSClusterExtension implements BeforeEachCallback, AfterEachCallback { + + private static final String TEST_BUILD_DATA = "test.build.data"; + + private final Supplier hadoopConfSupplier; + private Configuration hadoopConf; + private MiniDFSCluster dfscluster; + + public MiniDFSClusterExtension() { + this(() -> new Configuration()); + } + + public MiniDFSClusterExtension(Supplier hadoopConfSupplier) { + this.hadoopConfSupplier = hadoopConfSupplier; + } + + public Configuration getHadoopConf() { + return hadoopConf; + } + + public MiniDFSCluster getDfscluster() { + return dfscluster; + } + + @Override + public void beforeEach(ExtensionContext arg0) throws Exception { + System.setProperty(TEST_BUILD_DATA, "target/test/data"); + hadoopConf = hadoopConfSupplier.get(); + String tempDir = getTestDir("dfs").getAbsolutePath() + File.separator; + hadoopConf.set("hdfs.minidfs.basedir", tempDir); + dfscluster = new MiniDFSCluster.Builder(hadoopConf).numDataNodes(3).build(); + dfscluster.waitActive(); + } + + @Override + public void afterEach(ExtensionContext arg0) throws Exception { + dfscluster.shutdown(); + System.clearProperty(TEST_BUILD_DATA); + } + + /** + * Get an uncreated directory for tests. + * We use this method to get rid of getTestDir() in GenericTestUtils in Hadoop code + * which uses assert from junit4. + * @return the absolute directory for tests. Caller is expected to create it. + */ + public static File getTestDir(String subdir) { + return new File(getTestDir(), subdir).getAbsoluteFile(); + } + + /** + * Get the (created) base directory for tests. + * @return the absolute directory + */ + public static File getTestDir() { + String prop = System.getProperty(SYSPROP_TEST_DATA_DIR, DEFAULT_TEST_DATA_DIR); + if (prop.isEmpty()) { + // corner case: property is there but empty + prop = DEFAULT_TEST_DATA_DIR; + } + File dir = new File(prop).getAbsoluteFile(); + dir.mkdirs(); + assertTrue(dir.exists(), "File " + dir + " should exist"); + return dir; + } +} diff --git a/external/storm-hdfs-blobstore/src/test/java/org/apache/storm/hdfs/testing/MiniDFSClusterExtensionClassLevel.java b/external/storm-hdfs-blobstore/src/test/java/org/apache/storm/hdfs/testing/MiniDFSClusterExtensionClassLevel.java new file mode 100644 index 00000000000..1fd13d930ce --- /dev/null +++ b/external/storm-hdfs-blobstore/src/test/java/org/apache/storm/hdfs/testing/MiniDFSClusterExtensionClassLevel.java @@ -0,0 +1,70 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.storm.hdfs.testing; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.junit.jupiter.api.extension.AfterAllCallback; +import org.junit.jupiter.api.extension.BeforeAllCallback; +import org.junit.jupiter.api.extension.ExtensionContext; + +import java.io.File; +import java.util.function.Supplier; + +import static org.apache.storm.hdfs.testing.MiniDFSClusterExtension.getTestDir; + +public class MiniDFSClusterExtensionClassLevel implements BeforeAllCallback, AfterAllCallback { + + private static final String TEST_BUILD_DATA = "test.build.data"; + + private final Supplier hadoopConfSupplier; + private Configuration hadoopConf; + private MiniDFSCluster dfscluster; + + public MiniDFSClusterExtensionClassLevel() { + this(() -> new Configuration()); + } + + public MiniDFSClusterExtensionClassLevel(Supplier hadoopConfSupplier) { + this.hadoopConfSupplier = hadoopConfSupplier; + } + + public Configuration getHadoopConf() { + return hadoopConf; + } + + public MiniDFSCluster getDfscluster() { + return dfscluster; + } + + @Override + public void beforeAll(ExtensionContext arg0) throws Exception { + System.setProperty(TEST_BUILD_DATA, "target/test/data"); + hadoopConf = hadoopConfSupplier.get(); + String tempDir = getTestDir("dfs").getAbsolutePath() + File.separator; + hadoopConf.set("hdfs.minidfs.basedir", tempDir); + dfscluster = new MiniDFSCluster.Builder(hadoopConf).numDataNodes(3).build(); + dfscluster.waitActive(); + } + + @Override + public void afterAll(ExtensionContext arg0) throws Exception { + dfscluster.shutdown(); + System.clearProperty(TEST_BUILD_DATA); + } +} diff --git a/external/storm-hdfs-oci/pom.xml b/external/storm-hdfs-oci/pom.xml new file mode 100644 index 00000000000..26c6f37d0ca --- /dev/null +++ b/external/storm-hdfs-oci/pom.xml @@ -0,0 +1,132 @@ + + + + + storm + org.apache.storm + 2.8.4-SNAPSHOT + ../../pom.xml + + 4.0.0 + + storm-hdfs-oci + + + + org.slf4j + slf4j-api + + + org.apache.storm + storm-server + ${project.version} + ${provided.scope} + + + + org.slf4j + log4j-over-slf4j + + + + + org.apache.hadoop + hadoop-client-api + ${hadoop.version} + + + org.apache.hadoop + hadoop-client-runtime + ${hadoop.version} + + + com.google.guava + guava + + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + false + 1 + + + + org.apache.maven.plugins + maven-jar-plugin + + + + test-jar + + + + + + maven-clean-plugin + + + cleanup + clean + + clean + + + true + + + ./build/ + + + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + + org.apache.maven.plugins + maven-dependency-plugin + 3.9.0 + + + copy-dependencies + package + + copy-dependencies + + + false + false + true + runtime + + + + + + + + diff --git a/external/storm-hdfs-oci/src/main/java/org/apache/storm/container/oci/HdfsManifestToResourcesPlugin.java b/external/storm-hdfs-oci/src/main/java/org/apache/storm/container/oci/HdfsManifestToResourcesPlugin.java new file mode 100644 index 00000000000..0ad78032c22 --- /dev/null +++ b/external/storm-hdfs-oci/src/main/java/org/apache/storm/container/oci/HdfsManifestToResourcesPlugin.java @@ -0,0 +1,160 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.container.oci; + +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.storm.DaemonConfig; +import org.apache.storm.utils.HadoopLoginUtil; +import org.apache.storm.utils.ObjectReader; + +public class HdfsManifestToResourcesPlugin implements OciManifestToResourcesPluginInterface { + + private String layersDir; + private String configDir; + private FileSystem fs; + private LoadingCache statCache; + + private static final String CONFIG_MEDIA_TYPE = "application/vnd.docker.container.image.v1+json"; + + private static final String LAYER_TAR_GZIP_MEDIA_TYPE = "application/vnd.docker.image.rootfs.diff.tar.gzip"; + + private static final String SHA_256 = "sha256"; + + private static final String CONFIG_HASH_ALGORITHM = SHA_256; + + private static final String LAYER_HASH_ALGORITHM = SHA_256; + + private static final int SHA256_HASH_LENGTH = 64; + + private static final String ALPHA_NUMERIC = "[a-zA-Z0-9]+"; + + @Override + public void init(Map conf) throws IOException { + + //login to hdfs + HadoopLoginUtil.loginHadoop(conf); + + String topLevelDir = ObjectReader.getString(conf.get(DaemonConfig.STORM_OCI_IMAGE_HDFS_TOPLEVEL_DIR)); + + this.layersDir = topLevelDir + "/layers/"; + this.configDir = topLevelDir + "/config/"; + + this.fs = new Path(topLevelDir).getFileSystem(new Configuration()); + + CacheLoader cacheLoader = + new CacheLoader() { + @Override + public FileStatus load(Path path) throws Exception { + return statBlob(path); + } + }; + this.statCache = CacheBuilder.newBuilder().maximumSize(30) + .refreshAfterWrite(60, TimeUnit.MINUTES).build(cacheLoader); + } + + @Override + public List getLayerResources(ImageManifest manifest) throws IOException { + List ociResources = new ArrayList<>(); + for (ImageManifest.Blob blob : manifest.getLayers()) { + String mediaType = blob.getMediaType(); + if (!mediaType.equals(LAYER_TAR_GZIP_MEDIA_TYPE)) { + throw new IOException("Invalid layer mediaType: " + mediaType); + } + + String[] layerDigest = blob.getDigest().split(":", 2); + String algorithm = layerDigest[0]; + if (!algorithm.equals(LAYER_HASH_ALGORITHM)) { + throw new IOException("Invalid layer digest algorithm: " + algorithm); + } + + String hash = layerDigest[1]; + if (!hash.matches(ALPHA_NUMERIC) || hash.length() != SHA256_HASH_LENGTH) { + throw new IOException("Malformed layer digest: " + hash); + } + + long size = blob.getSize(); + String fileName = hash + ".sqsh"; + Path path = new Path(layersDir, fileName); + + try { + FileStatus stat = statCache.get(path); + long timestamp = stat.getModificationTime(); + + OciResource ociResource = new OciResource(path.toString(), fileName, size, timestamp, OciResource.OciResourceType.LAYER); + ociResources.add(ociResource); + } catch (ExecutionException e) { + throw new IOException(e); + } + } + return ociResources; + } + + @Override + public OciResource getConfigResource(ImageManifest manifest) throws IOException { + ImageManifest.Blob config = manifest.getConfig(); + + String mediaType = config.getMediaType(); + if (!mediaType.equals(CONFIG_MEDIA_TYPE)) { + throw new IOException("Invalid config mediaType: " + mediaType); + } + + String[] configDigest = config.getDigest().split(":", 2); + + String algorithm = configDigest[0]; + if (!algorithm.equals(CONFIG_HASH_ALGORITHM)) { + throw new IOException("Invalid config digest algorithm: " + algorithm); + } + + String hash = configDigest[1]; + if (!hash.matches(ALPHA_NUMERIC) || hash.length() != SHA256_HASH_LENGTH) { + throw new IOException("Malformed config digest: " + hash); + } + + long size = config.getSize(); + Path path = new Path(configDir + hash); + + OciResource ociResource; + + try { + FileStatus stat = statCache.get(path); + long timestamp = stat.getModificationTime(); + ociResource = new OciResource(path.toString(), hash, size, timestamp, OciResource.OciResourceType.CONFIG); + } catch (ExecutionException e) { + throw new IOException(e); + } + + return ociResource; + } + + private FileStatus statBlob(Path path) throws IOException { + return fs.getFileStatus(path); + } +} diff --git a/external/storm-hdfs-oci/src/main/java/org/apache/storm/container/oci/HdfsOciResourcesLocalizer.java b/external/storm-hdfs-oci/src/main/java/org/apache/storm/container/oci/HdfsOciResourcesLocalizer.java new file mode 100644 index 00000000000..ef749c25f7a --- /dev/null +++ b/external/storm-hdfs-oci/src/main/java/org/apache/storm/container/oci/HdfsOciResourcesLocalizer.java @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.container.oci; + +import java.io.File; +import java.io.IOException; +import java.util.Map; + +import org.apache.commons.io.FileDeleteStrategy; +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.storm.DaemonConfig; +import org.apache.storm.utils.ConfigUtils; +import org.apache.storm.utils.HadoopLoginUtil; +import org.apache.storm.utils.ObjectReader; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class HdfsOciResourcesLocalizer implements OciResourcesLocalizerInterface { + private static final Logger LOG = LoggerFactory.getLogger(HdfsOciResourcesLocalizer.class); + private static final int LOCALIZE_MAX_RETRY = 5; + private String layersLocalDir; + private String configLocalDir; + private FileSystem fs; + + /** + * Initialization. + * @param conf the storm conf. + * @throws IOException on I/O exception + */ + @Override + public void init(Map conf) throws IOException { + //login to hdfs + HadoopLoginUtil.loginHadoop(conf); + + String resourcesLocalDir = ObjectReader.getString(conf.get(DaemonConfig.STORM_OCI_RESOURCES_LOCAL_DIR), + ConfigUtils.supervisorLocalDir(conf) + "/oci-resources"); + FileUtils.forceMkdir(new File(resourcesLocalDir)); + this.layersLocalDir = resourcesLocalDir + "/layers/"; + this.configLocalDir = resourcesLocalDir + "/config/"; + String topLevelDir = ObjectReader.getString(conf.get(DaemonConfig.STORM_OCI_IMAGE_HDFS_TOPLEVEL_DIR)); + this.fs = new Path(topLevelDir).getFileSystem(new Configuration()); + } + + /** + * Download the resources from HDFS to local dir. + * @param ociResource The oci resource to download + * @return the destination of the oci resource + * @throws IOException on I/O exception + */ + @Override + public synchronized String localize(OciResource ociResource) throws IOException { + if (ociResource == null) { + return null; + } + File dst; + switch (ociResource.getType()) { + case CONFIG: + dst = new File(this.configLocalDir, ociResource.getFileName()); + break; + case LAYER: + dst = new File(layersLocalDir, ociResource.getFileName()); + break; + default: + throw new IOException("unknown OciResourceType " + ociResource.getType()); + } + + if (dst.exists()) { + LOG.info("{} already exists. Skip", dst); + } else { + // create working dir, copy file here, and set readable, then move to final location. + // this allows the operation to be atomic in case the supervisor dies. + File workingDir = new File(dst.getParent() + "/working"); + if (!workingDir.exists()) { + boolean dirCreated = workingDir.mkdirs(); + if (!dirCreated) { + throw new IOException("Couldn't create the directory: " + workingDir); + } + } + + File workingDst = new File(workingDir.getPath() + "/" + dst.getName()); + + LOG.info("Starting to copy {} from hdfs to {}", ociResource.getPath(), workingDst); + copyFileLocallyWithRetry(ociResource, workingDst); + LOG.info("Successfully finished copying {} from hdfs to {}", ociResource.getPath(), workingDst); + + //set to readable by anyone + boolean setReadable = workingDst.setReadable(true, false); + if (!setReadable) { + throw new IOException("Couldn't set " + workingDst + " to be world-readable"); + } + + boolean fileRenamed = workingDst.renameTo(dst); + if (!fileRenamed) { + throw new IOException("Couldn't move " + workingDst + " to " + dst); + } + } + return dst.toString(); + } + + private synchronized void copyFileLocallyWithRetry(OciResource ociResource, File dst) throws IOException { + IOException lastIoException = null; + + for (int retryCount = 0; retryCount < LOCALIZE_MAX_RETRY; retryCount++) { + try { + fs.copyToLocalFile(new Path(ociResource.getPath()), new Path(dst.toString())); + lastIoException = null; + break; + } catch (IOException e) { + if (dst.exists()) { + FileDeleteStrategy.FORCE.delete(dst); + } + LOG.warn("{} occurred at attempt {}, deleted corrupt file {} if present", e.toString(), retryCount, dst); + lastIoException = e; + try { + Thread.sleep(1500); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + throw new IOException("Failed to copy " + ociResource + " to " + dst, ie); + } + } + } + if (lastIoException != null) { + LOG.error("Resource localization of {} to {} failed after {} retries", ociResource, dst, LOCALIZE_MAX_RETRY, lastIoException); + throw lastIoException; + } + + } +} diff --git a/external/storm-hdfs-oci/src/main/java/org/apache/storm/container/oci/LocalOrHdfsImageTagToManifestPlugin.java b/external/storm-hdfs-oci/src/main/java/org/apache/storm/container/oci/LocalOrHdfsImageTagToManifestPlugin.java new file mode 100644 index 00000000000..949f0e1a7c0 --- /dev/null +++ b/external/storm-hdfs-oci/src/main/java/org/apache/storm/container/oci/LocalOrHdfsImageTagToManifestPlugin.java @@ -0,0 +1,297 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.container.oci; + +import com.fasterxml.jackson.databind.ObjectMapper; +import java.io.BufferedReader; +import java.io.File; +import java.io.FileReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.Map; +import org.apache.commons.io.IOUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.storm.DaemonConfig; +import org.apache.storm.utils.HadoopLoginUtil; +import org.apache.storm.utils.ObjectReader; +import org.apache.storm.utils.Time; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class LocalOrHdfsImageTagToManifestPlugin implements OciImageTagToManifestPluginInterface { + private static final Logger LOG = LoggerFactory.getLogger(LocalOrHdfsImageTagToManifestPlugin.class); + + private Map manifestCache; + private ObjectMapper objMapper; + private Map localImageToHashCache = new HashMap<>(); + private Map hdfsImageToHashCache = new HashMap<>(); + private Map conf; + private long hdfsModTime; + private long localModTime; + private String hdfsImageToHashFile; + private String manifestDir; + private String localImageTagToHashFile; + private int ociCacheRefreshIntervalSecs; + private long lastRefreshTime; + + private static final String LOCAL_OR_HDFS_IMAGE_TAG_TO_MANIFEST_PLUGIN_PREFIX = "storm.oci.local.or.hdfs.image.tag.to.manifest.plugin."; + + /** + * The HDFS location where the oci image-tag-to-hash file exists. + */ + private static final String HDFS_OCI_IMAGE_TAG_TO_HASH_FILE = + LOCAL_OR_HDFS_IMAGE_TAG_TO_MANIFEST_PLUGIN_PREFIX + "hdfs.hash.file"; + + /** + * The local file system location where the oci image-tag-to-hash file exists. + */ + private static final String LOCAL_OCI_IMAGE_TAG_TO_HASH_FILE = + LOCAL_OR_HDFS_IMAGE_TAG_TO_MANIFEST_PLUGIN_PREFIX + "local.hash.file"; + + /** + * The interval in seconds between refreshing the oci image-Tag-to-hash cache. + */ + private static final String OCI_CACHE_REFRESH_INTERVAL = + LOCAL_OR_HDFS_IMAGE_TAG_TO_MANIFEST_PLUGIN_PREFIX + "cache.refresh.interval.secs"; + + /** + * The number of manifests to cache. + */ + private static final String OCI_NUM_MANIFESTS_TO_CACHE = LOCAL_OR_HDFS_IMAGE_TAG_TO_MANIFEST_PLUGIN_PREFIX + "num.manifests.to.cache"; + + private static final int SHA256_HASH_LENGTH = 64; + + private static final String ALPHA_NUMERIC = "[a-zA-Z0-9]+"; + + @Override + public void init(Map conf) throws IOException { + this.conf = conf; + + //login to hdfs + HadoopLoginUtil.loginHadoop(conf); + + localImageTagToHashFile = (String) conf.get(LOCAL_OCI_IMAGE_TAG_TO_HASH_FILE); + if (localImageTagToHashFile == null) { + LOG.debug("Failed to load local oci-image-to-hash file. Config not set"); + } + hdfsImageToHashFile = (String) conf.get(HDFS_OCI_IMAGE_TAG_TO_HASH_FILE); + if (hdfsImageToHashFile == null) { + LOG.debug("Failed to load HDFS oci-image-to-hash file. Config not set"); + } + if (hdfsImageToHashFile == null && localImageTagToHashFile == null) { + throw new IllegalArgumentException("No valid image-tag-to-hash files"); + } + manifestDir = ObjectReader.getString(conf.get(DaemonConfig.STORM_OCI_IMAGE_HDFS_TOPLEVEL_DIR)) + "/manifests/"; + int numManifestsToCache = ObjectReader.getInt(conf.get(OCI_NUM_MANIFESTS_TO_CACHE), 10); + this.objMapper = new ObjectMapper(); + this.manifestCache = new LruCache(numManifestsToCache, 0.75f); + ociCacheRefreshIntervalSecs = ObjectReader.getInt(conf.get(OCI_CACHE_REFRESH_INTERVAL), 60); + } + + private boolean loadImageToHashFiles() throws IOException { + boolean ret = false; + try (BufferedReader localBr = getLocalImageToHashReader()) { + Map localImageToHash = readImageToHashFile(localBr, localImageTagToHashFile); + if (localImageToHash != null && !localImageToHash.equals(localImageToHashCache)) { + localImageToHashCache = localImageToHash; + LOG.info("Reloaded local image tag to hash cache"); + ret = true; + } + } + + try (BufferedReader hdfsBr = getHdfsImageToHashReader()) { + Map hdfsImageToHash = readImageToHashFile(hdfsBr, hdfsImageToHashFile); + if (hdfsImageToHash != null && !hdfsImageToHash.equals(hdfsImageToHashCache)) { + hdfsImageToHashCache = hdfsImageToHash; + LOG.info("Reloaded hdfs image tag to hash cache"); + ret = true; + } + } + return ret; + } + + private BufferedReader getLocalImageToHashReader() throws IOException { + if (localImageTagToHashFile == null) { + LOG.debug("Did not load local image to hash file, file is null"); + return null; + } + + File imageTagToHashFile = new File(localImageTagToHashFile); + if (!imageTagToHashFile.exists()) { + LOG.warn("Did not load local image to hash file, file doesn't exist"); + return null; + } + + long newLocalModTime = imageTagToHashFile.lastModified(); + if (newLocalModTime == localModTime) { + LOG.debug("Did not load local image to hash file, file is unmodified"); + return null; + } + localModTime = newLocalModTime; + + return new BufferedReader(new FileReader(imageTagToHashFile)); + } + + private BufferedReader getHdfsImageToHashReader() throws IOException { + if (hdfsImageToHashFile == null) { + LOG.debug("Did not load hdfs image to hash file, file is null"); + return null; + } + + Path imageToHash = new Path(hdfsImageToHashFile); + FileSystem fs = imageToHash.getFileSystem(new Configuration()); + if (!fs.exists(imageToHash)) { + String message = "Could not load hdfs image to hash file, " + hdfsImageToHashFile + " doesn't exist"; + LOG.error(message); + throw new IOException(message); + } + + long newHdfsModTime = fs.getFileStatus(imageToHash).getModificationTime(); + if (newHdfsModTime == hdfsModTime) { + LOG.debug("Did not load hdfs image to hash file, file is unmodified"); + return null; + } + hdfsModTime = newHdfsModTime; + + return new BufferedReader(new InputStreamReader(fs.open(imageToHash))); + } + + /** + * Read the image-tag-to-hash file and parse as a Map. + * + *

You may specify multiple tags per hash all on the same line. + * Comments are allowed using #. Anything after this character will not be read + * Example file: + * foo/bar:current,fizz/gig:latest:123456789 + * #this/line:wont,be:parsed:2378590895 + * + *

This will map both foo/bar:current and fizz/gig:latest to 123456789 + */ + private static Map readImageToHashFile(BufferedReader br, String filePath) throws IOException { + if (br == null) { + return null; + } + + String line; + Map imageToHashCache = new HashMap<>(); + while ((line = br.readLine()) != null) { + int index; + index = line.indexOf("#"); + if (index == 0) { + continue; + } else if (index != -1) { + line = line.substring(0, index); + } + + index = line.lastIndexOf(":"); + if (index == -1) { + LOG.warn("Malformed imageTagToManifest entry: {} in file: {}", line, filePath); + continue; + } + String imageTags = line.substring(0, index); + String[] imageTagArray = imageTags.split(","); + String hash = line.substring(index + 1); + + if (!hash.matches(ALPHA_NUMERIC) || hash.length() != SHA256_HASH_LENGTH) { + LOG.warn("Malformed image hash: " + hash); + continue; + } + + for (String imageTag : imageTagArray) { + imageToHashCache.put(imageTag, hash); + } + } + return imageToHashCache; + } + + + @Override + public synchronized ImageManifest getManifestFromImageTag(String imageTag) throws IOException { + String hash = getHashFromImageTag(imageTag); + ImageManifest manifest = manifestCache.get(hash); + if (manifest != null) { + return manifest; + } + Path manifestPath = new Path(manifestDir + hash); + FileSystem fs = manifestPath.getFileSystem(new Configuration()); + FSDataInputStream input; + try { + input = fs.open(manifestPath); + } catch (IllegalArgumentException iae) { + throw new IOException("Manifest file is not a valid HDFS file: " + + manifestPath.toString(), iae); + } + + byte[] bytes = IOUtils.toByteArray(input); + manifest = objMapper.readValue(bytes, ImageManifest.class); + + manifestCache.put(hash, manifest); + return manifest; + } + + @Override + public synchronized String getHashFromImageTag(String imageTag) { + String hash; + + long currentTime = System.currentTimeMillis(); + if (currentTime - lastRefreshTime > Time.secsToMillis(ociCacheRefreshIntervalSecs)) { + LOG.debug("Refreshing local and hdfs image-tag-to-hash cache"); + try { + boolean loaded = loadImageToHashFiles(); + //If this is the first time trying to load the files and yet it failed + if (!loaded && lastRefreshTime == 0) { + throw new RuntimeException("Couldn't load any image-tag-to-hash-files"); + } + lastRefreshTime = currentTime; + } catch (IOException e) { + throw new RuntimeException("Couldn't load any image-tag-to-hash-files", e); + } + } + + // 1) Go to local file + // 2) Go to HDFS + // 3) Use tag as is/Assume tag is the hash + if ((hash = localImageToHashCache.get(imageTag)) != null) { + return hash; + } else if ((hash = hdfsImageToHashCache.get(imageTag)) != null) { + return hash; + } else { + return imageTag; + } + } + + private static class LruCache extends LinkedHashMap { + private int cacheSize; + + LruCache(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor, true); + this.cacheSize = initialCapacity; + } + + @Override + protected boolean removeEldestEntry(Map.Entry eldest) { + return this.size() > cacheSize; + } + } +} diff --git a/external/storm-hdfs/README.md b/external/storm-hdfs/README.md new file mode 100644 index 00000000000..9eb8b6dfc83 --- /dev/null +++ b/external/storm-hdfs/README.md @@ -0,0 +1,605 @@ +# Storm HDFS + +Storm components for interacting with HDFS file systems + - [HDFS Bolt](#hdfs-bolt) + - [HDFS Spout](#hdfs-spout) + +--- + +# HDFS Bolt +## Usage +The following example will write pipe("|")-delimited files to the HDFS path hdfs://localhost:54310/foo. After every +1,000 tuples it will sync filesystem, making that data visible to other HDFS clients. It will rotate files when they +reach 5 megabytes in size. + +```java +// use "|" instead of "," for field delimiter +RecordFormat format = new DelimitedRecordFormat() + .withFieldDelimiter("|"); + +// sync the filesystem after every 1k tuples +SyncPolicy syncPolicy = new CountSyncPolicy(1000); + +// rotate files when they reach 5MB +FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, Units.MB); + +FileNameFormat fileNameFormat = new DefaultFileNameFormat() + .withPath("/foo/"); + +HdfsBolt bolt = new HdfsBolt() + .withFsUrl("hdfs://localhost:54310") + .withFileNameFormat(fileNameFormat) + .withRecordFormat(format) + .withRotationPolicy(rotationPolicy) + .withSyncPolicy(syncPolicy); +``` + + +### Packaging a Topology +When packaging your topology, it's important that you use the [maven-shade-plugin]() as opposed to the +[maven-assembly-plugin](). + +The shade plugin provides facilities for merging JAR manifest entries, which the hadoop client leverages for URL scheme +resolution. + +If you experience errors such as the following: + +``` +java.lang.RuntimeException: Error preparing HdfsBolt: No FileSystem for scheme: hdfs +``` + +it's an indication that your topology jar file isn't packaged properly. + +If you are using maven to create your topology jar, you should use the following `maven-shade-plugin` configuration to +create your topology jar: + +```xml + + org.apache.maven.plugins + maven-shade-plugin + 1.4 + + true + + + + package + + shade + + + + + + + + + + + + + +``` + +### Specifying a Hadoop Version +By default, storm-hdfs uses the following Hadoop dependencies: + +```xml + + org.apache.hadoop + hadoop-client + 2.6.1 + + + org.slf4j + slf4j-log4j12 + + + + + org.apache.hadoop + hadoop-hdfs + 2.6.1 + + + org.slf4j + slf4j-log4j12 + + + +``` + +If you are using a different version of Hadoop, you should exclude the Hadoop libraries from the storm-hdfs dependency +and add the dependencies for your preferred version in your pom. + +Hadoop client version incompatibilites can manifest as errors like: + +``` +com.google.protobuf.InvalidProtocolBufferException: Protocol message contained an invalid tag (zero) +``` + +## HDFS Bolt Customization + +### Record Formats +Record format can be controlled by providing an implementation of the `org.apache.storm.hdfs.format.RecordFormat` +interface: + +```java +public interface RecordFormat extends Serializable { + byte[] format(Tuple tuple); +} +``` + +The provided `org.apache.storm.hdfs.format.DelimitedRecordFormat` is capable of producing formats such as CSV and +tab-delimited files. + + +### File Naming +File naming can be controlled by providing an implementation of the `org.apache.storm.hdfs.format.FileNameFormat` +interface: + +```java +public interface FileNameFormat extends Serializable { + void prepare(Map conf, TopologyContext topologyContext); + String getName(long rotation, long timeStamp); + String getPath(); +} +``` + +The provided `org.apache.storm.hdfs.format.DefaultFileNameFormat` will create file names with the following format: + + {prefix}{componentId}-{taskId}-{rotationNum}-{timestamp}{extension} + +For example: + + MyBolt-5-7-1390579837830.txt + +By default, prefix is empty and extenstion is ".txt". + +**New FileNameFormat:** + +The new provided `org.apache.storm.hdfs.format.SimpleFileNameFormat` and `org.apache.storm.hdfs.trident.format.SimpleFileNameFormat` are more flexible, and the `withName` method support parameters as following: + +* $TIME - current time. use `withTimeFormat` to format. +* $NUM - rotation number +* $HOST - local host name +* $PARTITION - partition index (`org.apache.storm.hdfs.trident.format.SimpleFileNameFormat` only) +* $COMPONENT - component id (`org.apache.storm.hdfs.format.SimpleFileNameFormat` only) +* $TASK - task id (`org.apache.storm.hdfs.format.SimpleFileNameFormat` only) + +eg: `seq.$TIME.$HOST.$COMPONENT.$NUM.dat` + +The default file `name` is `$TIME.$NUM.txt`, and the default `timeFormat` is `yyyyMMddHHmmss`. + + +### Sync Policies +Sync policies allow you to control when buffered data is flushed to the underlying filesystem (thus making it available +to clients reading the data) by implementing the `org.apache.storm.hdfs.sync.SyncPolicy` interface: + +```java +public interface SyncPolicy extends Serializable { + boolean mark(Tuple tuple, long offset); + void reset(); +} +``` +The `HdfsBolt` will call the `mark()` method for every tuple it processes. Returning `true` will trigger the `HdfsBolt` +to perform a sync/flush, after which it will call the `reset()` method. + +The `org.apache.storm.hdfs.sync.CountSyncPolicy` class simply triggers a sync after the specified number of tuples have +been processed. + +### File Rotation Policies +Similar to sync policies, file rotation policies allow you to control when data files are rotated by providing a +`org.apache.storm.hdfs.rotation.FileRotation` interface: + +```java +public interface FileRotationPolicy extends Serializable { + boolean mark(Tuple tuple, long offset); + void reset(); + FileRotationPolicy copy(); +} +``` + +The `org.apache.storm.hdfs.rotation.FileSizeRotationPolicy` implementation allows you to trigger file rotation when +data files reach a specific file size: + +```java +FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, Units.MB); +``` + +### File Rotation Actions +Both the HDFS bolt and Trident State implementation allow you to register any number of `RotationAction`s. +What `RotationAction`s do is provide a hook to allow you to perform some action right after a file is rotated. For +example, moving a file to a different location or renaming it. + + +```java +public interface RotationAction extends Serializable { + void execute(FileSystem fileSystem, Path filePath) throws IOException; +} +``` + +Storm-HDFS includes a simple action that will move a file after rotation: + +```java +public class MoveFileAction implements RotationAction { + private static final Logger LOG = LoggerFactory.getLogger(MoveFileAction.class); + + private String destination; + + public MoveFileAction withDestination(String destDir){ + destination = destDir; + return this; + } + + @Override + public void execute(FileSystem fileSystem, Path filePath) throws IOException { + Path destPath = new Path(destination, filePath.getName()); + LOG.info("Moving file {} to {}", filePath, destPath); + boolean success = fileSystem.rename(filePath, destPath); + return; + } +} +``` + +If you are using Trident and sequence files you can do something like this: + +```java + HdfsState.Options seqOpts = new HdfsState.SequenceFileOptions() + .withFileNameFormat(fileNameFormat) + .withSequenceFormat(new DefaultSequenceFormat("key", "data")) + .withRotationPolicy(rotationPolicy) + .withFsUrl("hdfs://localhost:54310") + .addRotationAction(new MoveFileAction().withDestination("/dest2/")); +``` + +### Data Partitioning +Data can be partitioned to different HDFS directories based on characteristics of the tuple being processed or purely +external factors, such as system time. To partition your your data, write a class that implements the ```Partitioner``` +interface and pass it to the withPartitioner() method of your bolt. The getPartitionPath() method returns a partition +path for a given tuple. + +Here's an example of a Partitioner that operates on a specific field of data: + +```java + + Partitioner partitoner = new Partitioner() { + @Override + public String getPartitionPath(Tuple tuple) { + return Path.SEPARATOR + tuple.getStringByField("city"); + } + }; +``` + +## HDFS Bolt Support for HDFS Sequence Files + +The `org.apache.storm.hdfs.bolt.SequenceFileBolt` class allows you to write storm data to HDFS sequence files: + +```java + // sync the filesystem after every 1k tuples + SyncPolicy syncPolicy = new CountSyncPolicy(1000); + + // rotate files when they reach 5MB + FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, Units.MB); + + FileNameFormat fileNameFormat = new DefaultFileNameFormat() + .withExtension(".seq") + .withPath("/data/"); + + // create sequence format instance. + DefaultSequenceFormat format = new DefaultSequenceFormat("timestamp", "sentence"); + + SequenceFileBolt bolt = new SequenceFileBolt() + .withFsUrl("hdfs://localhost:54310") + .withFileNameFormat(fileNameFormat) + .withSequenceFormat(format) + .withRotationPolicy(rotationPolicy) + .withSyncPolicy(syncPolicy) + .withCompressionType(SequenceFile.CompressionType.RECORD) + .withCompressionCodec("deflate"); +``` + +The `SequenceFileBolt` requires that you provide a `org.apache.storm.hdfs.bolt.format.SequenceFormat` that maps tuples to +key/value pairs: + +```java +public interface SequenceFormat extends Serializable { + Class keyClass(); + Class valueClass(); + + Writable key(Tuple tuple); + Writable value(Tuple tuple); +} +``` + +## HDFS Bolt Support for Avro Files + +The `org.apache.storm.hdfs.bolt.AvroGenericRecordBolt` class allows you to write Avro objects directly to HDFS: + +```java + // sync the filesystem after every 1k tuples + SyncPolicy syncPolicy = new CountSyncPolicy(1000); + + // rotate files when they reach 5MB + FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, Units.MB); + + FileNameFormat fileNameFormat = new DefaultFileNameFormat() + .withExtension(".avro") + .withPath("/data/"); + + // create sequence format instance. + DefaultSequenceFormat format = new DefaultSequenceFormat("timestamp", "sentence"); + + AvroGenericRecordBolt bolt = new AvroGenericRecordBolt() + .withFsUrl("hdfs://localhost:54310") + .withFileNameFormat(fileNameFormat) + .withRotationPolicy(rotationPolicy) + .withSyncPolicy(syncPolicy); +``` + +The avro bolt will write records to separate files based on the schema of the record being processed. In other words, +if the bolt receives records with two different schemas, it will write to two separate files. Each file will be rotatated +in accordance with the specified rotation policy. If a large number of Avro schemas are expected, then the bolt should +be configured with a maximum number of open files at least equal to the number of schemas expected to prevent excessive +file open/close/create operations. + +To use this bolt you **must** register the appropriate Kryo serializers with your topology configuration. A convenience +method is provided for this: + +`AvroUtils.addAvroKryoSerializations(conf);` + +By default Storm will use the ```GenericAvroSerializer``` to handle serialization. This will work, but there are much +faster options available if you can pre-define the schemas you will be using or utilize an external schema registry. + +Please see the javadoc for classes in org.apache.storm.hdfs.avro for information about using the built-in options or +creating your own. + + +## HDFS Bolt support for Trident API +storm-hdfs also includes a Trident `state` implementation for writing data to HDFS, with an API that closely mirrors +that of the bolts. + + ```java + Fields hdfsFields = new Fields("field1", "field2"); + + FileNameFormat fileNameFormat = new DefaultFileNameFormat() + .withPath("/trident") + .withPrefix("trident") + .withExtension(".txt"); + + RecordFormat recordFormat = new DelimitedRecordFormat() + .withFields(hdfsFields); + + FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, FileSizeRotationPolicy.Units.MB); + + HdfsState.Options options = new HdfsState.HdfsFileOptions() + .withFileNameFormat(fileNameFormat) + .withRecordFormat(recordFormat) + .withRotationPolicy(rotationPolicy) + .withFsUrl("hdfs://localhost:54310"); + + StateFactory factory = new HdfsStateFactory().withOptions(options); + + TridentState state = stream + .partitionPersist(factory, hdfsFields, new HdfsUpdater(), new Fields()); + ``` + + To use the sequence file `State` implementation, use the `HdfsState.SequenceFileOptions`: + + ```java + HdfsState.Options seqOpts = new HdfsState.SequenceFileOptions() + .withFileNameFormat(fileNameFormat) + .withSequenceFormat(new DefaultSequenceFormat("key", "data")) + .withRotationPolicy(rotationPolicy) + .withFsUrl("hdfs://localhost:54310") + .addRotationAction(new MoveFileAction().toDestination("/dest2/")); +``` + +### Note +Whenever a batch is replayed by storm (due to failures), the trident state implementation automatically removes +duplicates from the current data file by copying the data up to the last transaction to another file. Since this +operation involves a lot of data copy, ensure that the data files are rotated at reasonable sizes with `FileSizeRotationPolicy` +and at reasonable intervals with `TimedRotationPolicy` so that the recovery can complete within topology.message.timeout.secs. + +Also note with `TimedRotationPolicy` the files are never rotated in the middle of a batch even if the timer ticks, +but only when a batch completes so that complete batches can be efficiently recovered in case of failures. + +##Working with Secure HDFS +If your topology is going to interact with secure HDFS, your bolts/states needs to be authenticated by NameNode. We +currently have 2 options to support this: + +### Using HDFS delegation tokens +Your administrator can configure nimbus to automatically get delegation tokens on behalf of the topology submitter user. The nimbus should be started with following configurations: + +``` +nimbus.autocredential.plugins.classes : ["org.apache.storm.hdfs.security.AutoHDFS"] +nimbus.credential.renewers.classes : ["org.apache.storm.hdfs.security.AutoHDFS"] +hdfs.keytab.file: "/path/to/keytab/on/nimbus" (This is the keytab of hdfs super user that can impersonate other users.) +hdfs.kerberos.principal: "superuser@EXAMPLE.com" +nimbus.credential.renewers.freq.secs : 82800 (23 hours, hdfs tokens needs to be renewed every 24 hours so this value should be less then 24 hours.) +topology.hdfs.uri:"hdfs://host:port" (This is an optional config, by default we will use value of "fs.defaultFS" property specified in hadoop's core-site.xml) +``` + +Your topology configuration should have: + +``` +topology.auto-credentials :["org.apache.storm.hdfs.common.security.AutoHDFS"] +``` + +If nimbus did not have the above configuration you need to add and then restart it. Ensure the hadoop configuration +files (core-site.xml and hdfs-site.xml) and the storm-hdfs jar with all the dependencies is present in nimbus's classpath. + +As an alternative to adding the configuration files (core-site.xml and hdfs-site.xml) to the classpath, you could specify the configurations +as a part of the topology configuration. E.g. in you custom storm.yaml (or -c option while submitting the topology), + +``` +hdfsCredentialsConfigKeys : ["cluster1", "cluster2"] (the hdfs clusters you want to fetch the tokens from) +"cluster1": {"config1": "value1", "config2": "value2", ... } (A map of config key-values specific to cluster1) +"cluster2": {"config1": "value1", "hdfs.keytab.file": "/path/to/keytab/for/cluster2/on/nimubs", "hdfs.kerberos.principal": "cluster2user@EXAMPLE.com"} (here along with other configs, we have custom keytab and principal for "cluster2" which will override the keytab/principal specified at topology level) +``` + +Instead of specifying key values you may also directly specify the resource files for e.g., + +``` +"cluster1": {"resources": ["/path/to/core-site1.xml", "/path/to/hdfs-site1.xml"]} +"cluster2": {"resources": ["/path/to/core-site2.xml", "/path/to/hdfs-site2.xml"]} +``` + +Storm will download the tokens separately for each of the clusters and populate it into the subject and also renew the tokens periodically. This way it would be possible to run multiple bolts connecting to separate HDFS cluster within the same topology. + +Nimbus will use the keytab and principal specified in the config to authenticate with Namenode. From then on for every +topology submission, nimbus will impersonate the topology submitter user and acquire delegation tokens on behalf of the +topology submitter user. If topology was started with topology.auto-credentials set to AutoHDFS, nimbus will push the +delegation tokens to all the workers for your topology and the hdfs bolt/state will authenticate with namenode using +these tokens. + +As nimbus is impersonating topology submitter user, you need to ensure the user specified in hdfs.kerberos.principal +has permissions to acquire tokens on behalf of other users. To achieve this you need to follow configuration directions +listed on this link +http://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/Superusers.html + +You can read about setting up secure HDFS here: http://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/SecureMode.html. + +### Using keytabs on all worker hosts +If you have distributed the keytab files for hdfs user on all potential worker hosts then you can use this method. You should specify a +hdfs config key using the method HdfsBolt/State.withconfigKey("somekey") and the value map of this key should have following 2 properties: + +hdfs.keytab.file: "/path/to/keytab/" +hdfs.kerberos.principal: "user@EXAMPLE.com" + +On worker hosts the bolt/trident-state code will use the keytab file with principal provided in the config to authenticate with +Namenode. This method is little dangerous as you need to ensure all workers have the keytab file at the same location and you need +to remember this as you bring up new hosts in the cluster. + +--- + +# HDFS Spout + +Hdfs spout is intended to allow feeding data into Storm from a HDFS directory. +It will actively monitor the directory to consume any new files that appear in the directory. +HDFS spout does not support Trident currently. + +**Impt**: Hdfs spout assumes that the files being made visible to it in the monitored directory +are NOT actively being written to. Only after a file is completely written should it be made +visible to the spout. This can be achieved by either writing the files out to another directory +and once completely written, move it to the monitored directory. Alternatively the file +can be created with a '.ignore' suffix in the monitored directory and after data is completely +written, rename it without the suffix. File names with a '.ignore' suffix are ignored +by the spout. + +When the spout is actively consuming a file, it renames the file with a '.inprogress' suffix. +After consuming all the contents in the file, the file will be moved to a configurable *done* +directory and the '.inprogress' suffix will be dropped. + +**Concurrency** If multiple spout instances are used in the topology, each instance will consume +a different file. Synchronization among spout instances is done using lock files created in a +(by default) '.lock' subdirectory under the monitored directory. A file with the same name +as the file being consumed (without the in progress suffix) is created in the lock directory. +Once the file is completely consumed, the corresponding lock file is deleted. + +**Recovery from failure** +Periodically, the spout also records progress information wrt to how much of the file has been +consumed in the lock file. In case of an crash of the spout instance (or force kill of topology) +another spout can take over the file and resume from the location recorded in the lock file. + +Certain error conditions (such spout crashing) can leave behind lock files without deleting them. +Such a stale lock file also indicates that the corresponding input file has also not been completely +processed. When detected, ownership of such stale lock files will be transferred to another spout. +The configuration 'hdfsspout.lock.timeout.sec' is used to specify the duration of inactivity after +which lock files should be considered stale. For lock file ownership transfer to succeed, the HDFS +lease on the file (from prev lock owner) should have expired. Spouts scan for stale lock files +before selecting the next file for consumption. + +**Lock on *.lock* Directory** +Hdfs spout instances create a *DIRLOCK* file in the .lock directory to co-ordinate certain accesses to +the .lock dir itself. A spout will try to create it when it needs access to the .lock directory and +then delete it when done. In error conditions such as a topology crash, force kill or untimely death +of a spout, this file may not get deleted. Future running instances of the spout will eventually recover +this once the DIRLOCK file becomes stale due to inactivity for hdfsspout.lock.timeout.sec seconds. + +## Usage + +The following example creates an HDFS spout that reads text files from HDFS path hdfs://localhost:54310/source. + +```java +// Instantiate spout to read text files +HdfsSpout textReaderSpout = new HdfsSpout().setReaderType("text") + .withOutputFields(TextFileReader.defaultFields) + .setHdfsUri("hdfs://localhost:54310") // required + .setSourceDir("/data/in") // required + .setArchiveDir("/data/done") // required + .setBadFilesDir("/data/badfiles"); // required +// If using Kerberos +HashMap hdfsSettings = new HashMap(); +hdfsSettings.put("hdfs.keytab.file", "/path/to/keytab"); +hdfsSettings.put("hdfs.kerberos.principal","user@EXAMPLE.com"); + +textReaderSpout.setHdfsClientSettings(hdfsSettings); + +// Create topology +TopologyBuilder builder = new TopologyBuilder(); +builder.setSpout("hdfsspout", textReaderSpout, SPOUT_NUM); + +// Setup bolts and wire up topology + ..snip.. + +// Submit topology with config +Config conf = new Config(); +StormSubmitter.submitTopologyWithProgressBar("topologyName", conf, builder.createTopology()); +``` + +A sample topology HdfsSpoutTopology is provided in storm-starter module. + +## Configuration Settings +Below is a list of HdfsSpout member functions used for configuration. The equivalent config is also possible via Config object passed in during submitting topology. +However, the later mechanism is deprecated as it does not allow multiple Hdfs spouts with differing settings. : + + +Only methods mentioned in **bold** are required. + +| Method | Alternative config name (deprecated) | Default | Description | +|----------------------------|--------------------------------------|-------------|-------------| +| **.setReaderType()** |~~hdfsspout.reader.type~~ | | Determines which file reader to use. Set to 'seq' for reading sequence files or 'text' for text files. Set to a fully qualified class name if using a custom file reader class (that implements interface org.apache.storm.hdfs.spout.FileReader)| +| **.withOutputFields()** | | | Sets the names for the output fields for the spout. The number of fields depends upon the reader being used. For convenience, built-in reader types expose a static member called `defaultFields` that can be used for setting this.| +| **.setHdfsUri()** |~~hdfsspout.hdfs~~ | | HDFS URI for the hdfs Name node. Example: hdfs://namenodehost:8020| +| **.setSourceDir()** |~~hdfsspout.source.dir~~ | | HDFS directory from where to read files. E.g. /data/inputdir| +| **.setArchiveDir()** |~~hdfsspout.archive.dir~~ | | After a file is processed completely it will be moved to this HDFS directory. If this directory does not exist it will be created. E.g. /data/done| +| **.setBadFilesDir()** |~~hdfsspout.badfiles.dir~~ | | if there is an error parsing a file's contents, the file is moved to this location. If this directory does not exist it will be created. E.g. /data/badfiles | +| .setLockDir() |~~hdfsspout.lock.dir~~ | '.lock' subdirectory under hdfsspout.source.dir | Dir in which lock files will be created. Concurrent HDFS spout instances synchronize using *lock* files. Before processing a file the spout instance creates a lock file in this directory with same name as input file and deletes this lock file after processing the file. Spouts also periodically makes a note of their progress (wrt reading the input file) in the lock file so that another spout instance can resume progress on the same file if the spout dies for any reason.| +| .setIgnoreSuffix() |~~hdfsspout.ignore.suffix~~ | .ignore | File names with this suffix in the in the hdfsspout.source.dir location will not be processed| +| .setCommitFrequencyCount() |~~hdfsspout.commit.count~~ | 20000 | Record progress in the lock file after these many records are processed. If set to 0, this criterion will not be used. | +| .setCommitFrequencySec() |~~hdfsspout.commit.sec~~ | 10 | Record progress in the lock file after these many seconds have elapsed. Must be greater than 0 | +| .setMaxOutstanding() |~~hdfsspout.max.outstanding~~ | 10000 | Limits the number of unACKed tuples by pausing tuple generation (if ACKers are used in the topology) | +| .setLockTimeoutSec() |~~hdfsspout.lock.timeout.sec~~ | 5 minutes | Duration of inactivity after which a lock file is considered to be abandoned and ready for another spout to take ownership | +| .setClocksInSync() |~~hdfsspout.clocks.insync~~ | true | Indicates whether clocks on the storm machines are in sync (using services like NTP). Used for detecting stale locks. | +| .withConfigKey() | | | Optional setting. Overrides the default key name ('hdfs.config', see below) used for specifying HDFS client configs. | +| .setHdfsClientSettings() |~~hdfs.config~~ (unless changed via withConfigKey)| | Set it to a Map of Key/value pairs indicating the HDFS settings to be used. For example, keytab and principal could be set using this. See section **Using keytabs on all worker hosts** under HDFS bolt below.| +| .withOutputStream() | | | Name of output stream. If set, the tuples will be emited to the specified stream. Else tuples will be emited to the default output stream | + +--- + +# License + +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, +software distributed under the License is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied. See the License for the +specific language governing permissions and limitations +under the License. + +# Committer Sponsors + + * P. Taylor Goetz ([ptgoetz@apache.org](mailto:ptgoetz@apache.org)) + * Bobby Evans ([bobby@apache.org](mailto:bobby@apache.org)) diff --git a/external/storm-hdfs/pom.xml b/external/storm-hdfs/pom.xml new file mode 100644 index 00000000000..47c7f6ecf1f --- /dev/null +++ b/external/storm-hdfs/pom.xml @@ -0,0 +1,156 @@ + + + + 4.0.0 + + + storm + org.apache.storm + 2.8.4-SNAPSHOT + ../../pom.xml + + + storm-hdfs + + + + org.slf4j + slf4j-api + + + org.apache.storm + storm-client + ${project.version} + ${provided.scope} + + + + org.slf4j + log4j-over-slf4j + + + + + org.apache.storm + storm-client + ${project.version} + test-jar + test + + + + org.slf4j + log4j-over-slf4j + + + + + org.apache.storm + storm-autocreds + ${project.version} + + + org.apache.hadoop + hadoop-common + + + org.apache.hadoop + hadoop-hdfs + + + org.apache.hadoop + hadoop-auth + + + + + org.apache.hadoop + hadoop-client-api + ${hadoop.version} + + + org.apache.hadoop + hadoop-client-runtime + ${hadoop.version} + + + org.apache.avro + avro + + + com.google.guava + guava + + + org.mockito + mockito-core + test + + + org.hamcrest + hamcrest + test + + + org.apache.hadoop + hadoop-client-minicluster + ${hadoop.version} + test + + + org.eclipse.jetty.ee10 + jetty-ee10-servlet + ${jetty.version} + test + + + org.eclipse.jetty.ee10 + jetty-ee10-webapp + ${jetty.version} + test + + + org.mockito + mockito-junit-jupiter + test + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + false + 1 + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + + + org.apache.maven.plugins + maven-pmd-plugin + + + + diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/avro/AbstractAvroSerializer.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/avro/AbstractAvroSerializer.java new file mode 100644 index 00000000000..04cef636714 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/avro/AbstractAvroSerializer.java @@ -0,0 +1,75 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.avro; + +import com.esotericsoftware.kryo.Kryo; +import com.esotericsoftware.kryo.Serializer; +import com.esotericsoftware.kryo.io.Input; +import com.esotericsoftware.kryo.io.Output; +import java.io.IOException; +import org.apache.avro.Schema; +import org.apache.avro.generic.GenericContainer; +import org.apache.avro.generic.GenericDatumReader; +import org.apache.avro.generic.GenericDatumWriter; +import org.apache.avro.io.BinaryEncoder; +import org.apache.avro.io.Decoder; +import org.apache.avro.io.DecoderFactory; +import org.apache.avro.io.EncoderFactory; + +//Generously adapted from: +//https://github.com/kijiproject/kiji-express/blob/master/kiji-express/src/main/scala/org/kiji/express/flow/framework/serialization +// /AvroSerializer.scala +//Which has as an ASL2.0 license + +/** + * This abstract class can be extended to implement concrete classes capable of (de)serializing generic avro objects + * across a Topology. The methods in the AvroSchemaRegistry interface specify how schemas can be mapped to unique + * identifiers and vice versa. Implementations based on pre-defining schemas or utilizing an external schema registry + * are provided. + */ +public abstract class AbstractAvroSerializer extends Serializer implements AvroSchemaRegistry { + + @Override + public void write(Kryo kryo, Output output, GenericContainer record) { + + String fingerPrint = this.getFingerprint(record.getSchema()); + output.writeString(fingerPrint); + GenericDatumWriter writer = new GenericDatumWriter<>(record.getSchema()); + + BinaryEncoder encoder = EncoderFactory + .get() + .directBinaryEncoder(output, null); + try { + writer.write(record, encoder); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + public GenericContainer read(Kryo kryo, Input input, Class someClass) { + Schema theSchema = this.getSchema(input.readString()); + GenericDatumReader reader = new GenericDatumReader<>(theSchema); + Decoder decoder = DecoderFactory + .get() + .directBinaryDecoder(input, null); + + GenericContainer foo; + try { + foo = reader.read(null, decoder); + } catch (IOException e) { + throw new RuntimeException(e); + } + return foo; + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/avro/AvroSchemaRegistry.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/avro/AvroSchemaRegistry.java new file mode 100644 index 00000000000..cca5099df45 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/avro/AvroSchemaRegistry.java @@ -0,0 +1,22 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.avro; + +import java.io.Serializable; +import org.apache.avro.Schema; + +public interface AvroSchemaRegistry extends Serializable { + String getFingerprint(Schema schema); + + Schema getSchema(String fingerPrint); +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/avro/AvroUtils.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/avro/AvroUtils.java new file mode 100644 index 00000000000..13798bb24f5 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/avro/AvroUtils.java @@ -0,0 +1,38 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.avro; + +import org.apache.avro.generic.GenericData; +import org.apache.storm.Config; + +public class AvroUtils { + /** + * A helper method to extract avro serialization configurations from the topology configuration and register + * specific kryo serializers as necessary. A default serializer will be provided if none is specified in the + * configuration. "avro.serializer" should specify the complete class name of the serializer, e.g. + * "org.apache.stgorm.hdfs.avro.GenericAvroSerializer" + * + * @param conf The topology configuration + * @throws ClassNotFoundException If the specified serializer cannot be located. + */ + public static void addAvroKryoSerializations(Config conf) throws ClassNotFoundException { + final Class serializerClass; + if (conf.containsKey("avro.serializer")) { + serializerClass = Class.forName((String) conf.get("avro.serializer")); + } else { + serializerClass = GenericAvroSerializer.class; + } + conf.registerSerialization(GenericData.Record.class, serializerClass); + conf.setSkipMissingKryoRegistrations(false); + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/avro/FixedAvroSerializer.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/avro/FixedAvroSerializer.java new file mode 100644 index 00000000000..94607b37cb5 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/avro/FixedAvroSerializer.java @@ -0,0 +1,61 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.avro; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.security.NoSuchAlgorithmException; +import java.util.HashMap; +import java.util.Map; +import org.apache.avro.Schema; +import org.apache.avro.SchemaNormalization; +import org.apache.commons.codec.binary.Base64; + +/** + * A class to help (de)serialize a pre-defined set of Avro schemas. Schemas should be listed, one per line, in a file + * called "FixedAvroSerializer.config", which must be part of the Storm topology jar file. Any schemas intended to be + * used with this class **MUST** be defined in that file. + */ +public class FixedAvroSerializer extends AbstractAvroSerializer { + + private static final String FP_ALGO = "CRC-64-AVRO"; + final Map fingerprint2schemaMap = new HashMap<>(); + final Map schema2fingerprintMap = new HashMap<>(); + + public FixedAvroSerializer() throws IOException, NoSuchAlgorithmException { + InputStream in = this.getClass().getClassLoader().getResourceAsStream("FixedAvroSerializer.config"); + BufferedReader reader = new BufferedReader(new InputStreamReader(in)); + + String line; + while ((line = reader.readLine()) != null) { + Schema schema = new Schema.Parser().parse(line); + byte[] fp = SchemaNormalization.parsingFingerprint(FP_ALGO, schema); + String fingerPrint = new String(Base64.decodeBase64(fp)); + + fingerprint2schemaMap.put(fingerPrint, schema); + schema2fingerprintMap.put(schema, fingerPrint); + } + } + + @Override + public String getFingerprint(Schema schema) { + return schema2fingerprintMap.get(schema); + } + + @Override + public Schema getSchema(String fingerPrint) { + return fingerprint2schemaMap.get(fingerPrint); + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/avro/GenericAvroSerializer.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/avro/GenericAvroSerializer.java new file mode 100644 index 00000000000..6bb0e26b411 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/avro/GenericAvroSerializer.java @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.avro; + +import org.apache.avro.Schema; + +/** + * A default implementation of the AvroSerializer that will just pass literal schemas back and forth. This should + * only be used if no other serializer will fit a use case. + */ +public class GenericAvroSerializer extends AbstractAvroSerializer { + @Override + public String getFingerprint(Schema schema) { + return schema.toString(); + } + + @Override + public Schema getSchema(String fingerPrint) { + return new Schema.Parser().parse(fingerPrint); + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/AbstractHdfsBolt.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/AbstractHdfsBolt.java new file mode 100644 index 00000000000..ee820663350 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/AbstractHdfsBolt.java @@ -0,0 +1,337 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.bolt; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Timer; +import java.util.TimerTask; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.storm.hdfs.bolt.format.FileNameFormat; +import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy; +import org.apache.storm.hdfs.bolt.rotation.TimedRotationPolicy; +import org.apache.storm.hdfs.bolt.sync.SyncPolicy; +import org.apache.storm.hdfs.common.NullPartitioner; +import org.apache.storm.hdfs.common.Partitioner; +import org.apache.storm.hdfs.common.rotation.RotationAction; +import org.apache.storm.hdfs.security.HdfsSecurityUtil; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.base.BaseRichBolt; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.utils.TupleUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public abstract class AbstractHdfsBolt extends BaseRichBolt { + private static final Logger LOG = LoggerFactory.getLogger(AbstractHdfsBolt.class); + private static final Integer DEFAULT_RETRY_COUNT = 3; + /** + * Half of the default Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS + */ + private static final int DEFAULT_TICK_TUPLE_INTERVAL_SECS = 15; + private static final Integer DEFAULT_MAX_OPEN_FILES = 50; + + protected Map writers; + protected Map rotationCounterMap = new HashMap<>(); + protected List rotationActions = new ArrayList<>(); + protected OutputCollector collector; + protected transient FileSystem fs; + protected SyncPolicy syncPolicy; + protected FileRotationPolicy rotationPolicy; + protected FileNameFormat fileNameFormat; + protected String fsUrl; + protected String configKey; + protected transient Object writeLock; + protected transient Timer rotationTimer; // only used for TimedRotationPolicy + protected long offset = 0; + protected Integer fileRetryCount = DEFAULT_RETRY_COUNT; + protected Integer tickTupleInterval = DEFAULT_TICK_TUPLE_INTERVAL_SECS; + protected Integer maxOpenFiles = DEFAULT_MAX_OPEN_FILES; + protected Partitioner partitioner = new NullPartitioner(); + protected transient Configuration hdfsConfig; + private List tupleBatch = new LinkedList<>(); + + protected void rotateOutputFile(Writer writer) throws IOException { + LOG.info("Rotating output file..."); + long start = System.currentTimeMillis(); + synchronized (this.writeLock) { + writer.close(); + + LOG.info("Performing {} file rotation actions.", this.rotationActions.size()); + for (RotationAction action : this.rotationActions) { + action.execute(this.fs, writer.getFilePath()); + } + } + long time = System.currentTimeMillis() - start; + LOG.info("File rotation took {} ms.", time); + } + + /** + * Marked as final to prevent override. Subclasses should implement the doPrepare() method. + */ + @Override + public final void prepare(Map conf, TopologyContext topologyContext, OutputCollector collector) { + this.writeLock = new Object(); + if (this.syncPolicy == null) { + throw new IllegalStateException("SyncPolicy must be specified."); + } + if (this.rotationPolicy == null) { + throw new IllegalStateException("RotationPolicy must be specified."); + } + if (this.fsUrl == null) { + throw new IllegalStateException("File system URL must be specified."); + } + + writers = new WritersMap(this.maxOpenFiles, collector); + + this.collector = collector; + this.fileNameFormat.prepare(conf, topologyContext); + this.hdfsConfig = new Configuration(); + Map map = (Map) conf.get(this.configKey); + if (map != null) { + for (String key : map.keySet()) { + this.hdfsConfig.set(key, String.valueOf(map.get(key))); + } + } + + try { + HdfsSecurityUtil.login(conf, hdfsConfig); + doPrepare(conf, topologyContext, collector); + } catch (Exception e) { + throw new RuntimeException("Error preparing HdfsBolt: " + e.getMessage(), e); + } + + if (this.rotationPolicy instanceof TimedRotationPolicy) { + startTimedRotationPolicy(); + } + } + + @Override + public final void execute(Tuple tuple) { + + synchronized (this.writeLock) { + boolean forceSync = false; + Writer writer = null; + String writerKey = null; + + if (TupleUtils.isTick(tuple)) { + LOG.debug("TICK! forcing a file system flush"); + this.collector.ack(tuple); + forceSync = true; + } else { + + writerKey = getHashKeyForTuple(tuple); + + try { + writer = getOrCreateWriter(writerKey, tuple); + this.offset = writer.write(tuple); + tupleBatch.add(tuple); + } catch (IOException e) { + //If the write failed, try to sync anything already written + LOG.info("Tuple failed to write, forcing a flush of existing data."); + this.collector.reportError(e); + forceSync = true; + this.collector.fail(tuple); + } + } + + if (this.syncPolicy.mark(tuple, this.offset) || (forceSync && tupleBatch.size() > 0)) { + int attempts = 0; + boolean success = false; + IOException lastException = null; + // Make every attempt to sync the data we have. If it can't be done then kill the bolt with + // a runtime exception. The filesystem is presumably in a very bad state. + while (success == false && attempts < fileRetryCount) { + attempts += 1; + try { + syncAllWriters(); + LOG.debug("Data synced to filesystem. Ack'ing [{}] tuples", tupleBatch.size()); + for (Tuple t : tupleBatch) { + this.collector.ack(t); + } + tupleBatch.clear(); + syncPolicy.reset(); + success = true; + } catch (IOException e) { + LOG.warn("Data could not be synced to filesystem on attempt [{}]", attempts); + this.collector.reportError(e); + lastException = e; + } + } + + // If unsuccesful fail the pending tuples + if (success == false) { + LOG.warn("Data could not be synced to filesystem, failing this batch of tuples"); + for (Tuple t : tupleBatch) { + this.collector.fail(t); + } + tupleBatch.clear(); + + throw new RuntimeException("Sync failed [" + attempts + "] times.", lastException); + } + } + + if (writer != null && writer.needsRotation()) { + doRotationAndRemoveWriter(writerKey, writer); + } + } + } + + private Writer getOrCreateWriter(String writerKey, Tuple tuple) throws IOException { + Writer writer; + + writer = writers.get(writerKey); + if (writer == null) { + Path pathForNextFile = getBasePathForNextFile(tuple); + writer = makeNewWriter(pathForNextFile, tuple); + writers.put(writerKey, writer); + } + return writer; + } + + /** + * A tuple must be mapped to a writer based on two factors. + * - bolt specific logic that must separate tuples into different files in the same directory (see the avro bolt + * for an example of this) + * - the directory the tuple will be partioned into + */ + private String getHashKeyForTuple(Tuple tuple) { + final String boltKey = getWriterKey(tuple); + final String partitionDir = this.partitioner.getPartitionPath(tuple); + return boltKey + "****" + partitionDir; + } + + void doRotationAndRemoveWriter(String writerKey, Writer writer) { + try { + rotateOutputFile(writer); + } catch (IOException e) { + this.collector.reportError(e); + LOG.error("File could not be rotated"); + //At this point there is nothing to do. In all likelihood any filesystem operations will fail. + //The next tuple will almost certainly fail to write and/or sync, which force a rotation. That + //will give rotateAndReset() a chance to work which includes creating a fresh file handle. + } finally { + //rotateOutputFile(writer) has closed the writer. It's safe to remove the writer from the map here. + writers.remove(writerKey); + } + } + + @Override + public Map getComponentConfiguration() { + return TupleUtils.putTickFrequencyIntoComponentConfig(super.getComponentConfiguration(), tickTupleInterval); + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) { + } + + @Override + public void cleanup() { + doRotationAndRemoveAllWriters(); + if (this.rotationTimer != null) { + this.rotationTimer.cancel(); + } + } + + private void doRotationAndRemoveAllWriters() { + synchronized (writeLock) { + for (final Writer writer : writers.values()) { + try { + rotateOutputFile(writer); + } catch (IOException e) { + this.collector.reportError(e); + LOG.warn("IOException during scheduled file rotation.", e); + } + } + //above for-loop has closed all the writers. It's safe to clear the map here. + writers.clear(); + } + } + + private void syncAllWriters() throws IOException { + for (Writer writer : writers.values()) { + writer.sync(); + } + } + + private void startTimedRotationPolicy() { + long interval = ((TimedRotationPolicy) this.rotationPolicy).getInterval(); + this.rotationTimer = new Timer(true); + TimerTask task = new TimerTask() { + @Override + public void run() { + doRotationAndRemoveAllWriters(); + } + }; + this.rotationTimer.scheduleAtFixedRate(task, interval, interval); + } + + protected Path getBasePathForNextFile(Tuple tuple) { + + final String partitionPath = this.partitioner.getPartitionPath(tuple); + final int rotation; + if (rotationCounterMap.containsKey(partitionPath)) { + rotation = rotationCounterMap.get(partitionPath) + 1; + } else { + rotation = 0; + } + rotationCounterMap.put(partitionPath, rotation); + + return new Path(this.fsUrl + this.fileNameFormat.getPath() + partitionPath, + this.fileNameFormat.getName(rotation, System.currentTimeMillis())); + } + + protected abstract void doPrepare(Map conf, TopologyContext topologyContext, OutputCollector collector) throws + IOException; + + protected abstract String getWriterKey(Tuple tuple); + + protected abstract Writer makeNewWriter(Path path, Tuple tuple) throws IOException; + + static class WritersMap extends LinkedHashMap { + final long maxWriters; + final OutputCollector collector; + + WritersMap(long maxWriters, OutputCollector collector) { + super((int) maxWriters, 0.75f, true); + this.maxWriters = maxWriters; + this.collector = collector; + } + + @Override + protected boolean removeEldestEntry(Map.Entry eldest) { + if (this.size() > this.maxWriters) { + //The writer must be closed before removed from the map. + //If it failed, we might lose some data. + try { + eldest.getValue().close(); + } catch (IOException e) { + collector.reportError(e); + LOG.error("Failed to close the eldest Writer"); + } + return true; + } else { + return false; + } + } + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/AvroGenericRecordBolt.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/AvroGenericRecordBolt.java new file mode 100644 index 00000000000..f00df3a744c --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/AvroGenericRecordBolt.java @@ -0,0 +1,106 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.bolt; + +import java.io.IOException; +import java.net.URI; +import java.util.Map; +import org.apache.avro.Schema; +import org.apache.avro.generic.GenericRecord; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.storm.hdfs.bolt.format.FileNameFormat; +import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy; +import org.apache.storm.hdfs.bolt.sync.SyncPolicy; +import org.apache.storm.hdfs.common.AbstractHDFSWriter; +import org.apache.storm.hdfs.common.AvroGenericRecordHDFSWriter; +import org.apache.storm.hdfs.common.Partitioner; +import org.apache.storm.hdfs.common.rotation.RotationAction; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.tuple.Tuple; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class AvroGenericRecordBolt extends AbstractHdfsBolt { + + private static final Logger LOG = LoggerFactory.getLogger(AvroGenericRecordBolt.class); + + public AvroGenericRecordBolt withFsUrl(String fsUrl) { + this.fsUrl = fsUrl; + return this; + } + + public AvroGenericRecordBolt withConfigKey(String configKey) { + this.configKey = configKey; + return this; + } + + public AvroGenericRecordBolt withFileNameFormat(FileNameFormat fileNameFormat) { + this.fileNameFormat = fileNameFormat; + return this; + } + + public AvroGenericRecordBolt withSyncPolicy(SyncPolicy syncPolicy) { + this.syncPolicy = syncPolicy; + return this; + } + + public AvroGenericRecordBolt withRotationPolicy(FileRotationPolicy rotationPolicy) { + this.rotationPolicy = rotationPolicy; + return this; + } + + public AvroGenericRecordBolt addRotationAction(RotationAction action) { + this.rotationActions.add(action); + return this; + } + + public AvroGenericRecordBolt withTickTupleIntervalSeconds(int interval) { + this.tickTupleInterval = interval; + return this; + } + + public AvroGenericRecordBolt withMaxOpenFiles(int maxOpenFiles) { + this.maxOpenFiles = maxOpenFiles; + return this; + } + + public AvroGenericRecordBolt withPartitioner(Partitioner partitioner) { + this.partitioner = partitioner; + return this; + } + + @Override + protected void doPrepare(Map conf, TopologyContext topologyContext, OutputCollector collector) throws IOException { + LOG.info("Preparing AvroGenericRecord Bolt..."); + this.fs = FileSystem.get(URI.create(this.fsUrl), hdfsConfig); + } + + /** + * AvroGenericRecordBolt must override this method because messages with different schemas cannot be written to the + * same file. By treating the complete schema as the "key" AbstractHdfsBolt will associate a different writer for + * every distinct schema. + */ + @Override + protected String getWriterKey(Tuple tuple) { + Schema recordSchema = ((GenericRecord) tuple.getValue(0)).getSchema(); + return recordSchema.toString(); + } + + @Override + protected AbstractHDFSWriter makeNewWriter(Path path, Tuple tuple) throws IOException { + Schema recordSchema = ((GenericRecord) tuple.getValue(0)).getSchema(); + return new AvroGenericRecordHDFSWriter(this.rotationPolicy, path, this.fs.create(path), recordSchema); + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/HdfsBolt.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/HdfsBolt.java new file mode 100644 index 00000000000..6677c7b9f74 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/HdfsBolt.java @@ -0,0 +1,112 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.bolt; + +import java.io.IOException; +import java.net.URI; +import java.util.Map; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.storm.hdfs.bolt.format.FileNameFormat; +import org.apache.storm.hdfs.bolt.format.RecordFormat; +import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy; +import org.apache.storm.hdfs.bolt.sync.SyncPolicy; +import org.apache.storm.hdfs.common.AbstractHDFSWriter; +import org.apache.storm.hdfs.common.HDFSWriter; +import org.apache.storm.hdfs.common.Partitioner; +import org.apache.storm.hdfs.common.rotation.RotationAction; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.tuple.Tuple; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class HdfsBolt extends AbstractHdfsBolt { + private static final Logger LOG = LoggerFactory.getLogger(HdfsBolt.class); + + private transient FSDataOutputStream out; + private RecordFormat format; + + public HdfsBolt withFsUrl(String fsUrl) { + this.fsUrl = fsUrl; + return this; + } + + public HdfsBolt withConfigKey(String configKey) { + this.configKey = configKey; + return this; + } + + public HdfsBolt withFileNameFormat(FileNameFormat fileNameFormat) { + this.fileNameFormat = fileNameFormat; + return this; + } + + public HdfsBolt withRecordFormat(RecordFormat format) { + this.format = format; + return this; + } + + public HdfsBolt withSyncPolicy(SyncPolicy syncPolicy) { + this.syncPolicy = syncPolicy; + return this; + } + + public HdfsBolt withRotationPolicy(FileRotationPolicy rotationPolicy) { + this.rotationPolicy = rotationPolicy; + return this; + } + + public HdfsBolt addRotationAction(RotationAction action) { + this.rotationActions.add(action); + return this; + } + + public HdfsBolt withTickTupleIntervalSeconds(int interval) { + this.tickTupleInterval = interval; + return this; + } + + public HdfsBolt withRetryCount(int fileRetryCount) { + this.fileRetryCount = fileRetryCount; + return this; + } + + public HdfsBolt withPartitioner(Partitioner partitioner) { + this.partitioner = partitioner; + return this; + } + + public HdfsBolt withMaxOpenFiles(int maxOpenFiles) { + this.maxOpenFiles = maxOpenFiles; + return this; + } + + @Override + public void doPrepare(Map conf, TopologyContext topologyContext, OutputCollector collector) throws IOException { + LOG.info("Preparing HDFS Bolt..."); + this.fs = FileSystem.get(URI.create(this.fsUrl), hdfsConfig); + } + + @Override + protected String getWriterKey(Tuple tuple) { + return "CONSTANT"; + } + + @Override + protected AbstractHDFSWriter makeNewWriter(Path path, Tuple tuple) throws IOException { + this.out = this.fs.create(path); + return new HDFSWriter(rotationPolicy, path, out, format); + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/SequenceFileBolt.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/SequenceFileBolt.java new file mode 100644 index 00000000000..991a23cb905 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/SequenceFileBolt.java @@ -0,0 +1,142 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.bolt; + +import java.io.IOException; +import java.net.URI; +import java.util.Map; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.SequenceFile; +import org.apache.hadoop.io.compress.CompressionCodecFactory; +import org.apache.storm.hdfs.bolt.format.FileNameFormat; +import org.apache.storm.hdfs.bolt.format.SequenceFormat; +import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy; +import org.apache.storm.hdfs.bolt.sync.SyncPolicy; +import org.apache.storm.hdfs.common.AbstractHDFSWriter; +import org.apache.storm.hdfs.common.Partitioner; +import org.apache.storm.hdfs.common.SequenceFileWriter; +import org.apache.storm.hdfs.common.rotation.RotationAction; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.tuple.Tuple; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class SequenceFileBolt extends AbstractHdfsBolt { + private static final Logger LOG = LoggerFactory.getLogger(SequenceFileBolt.class); + + private SequenceFormat format; + private SequenceFile.CompressionType compressionType = SequenceFile.CompressionType.RECORD; + private transient SequenceFile.Writer writer; + + private String compressionCodec = "default"; + private transient CompressionCodecFactory codecFactory; + + public SequenceFileBolt() { + } + + public SequenceFileBolt withCompressionCodec(String codec) { + this.compressionCodec = codec; + return this; + } + + public SequenceFileBolt withFsUrl(String fsUrl) { + this.fsUrl = fsUrl; + return this; + } + + public SequenceFileBolt withConfigKey(String configKey) { + this.configKey = configKey; + return this; + } + + public SequenceFileBolt withFileNameFormat(FileNameFormat fileNameFormat) { + this.fileNameFormat = fileNameFormat; + return this; + } + + public SequenceFileBolt withSequenceFormat(SequenceFormat format) { + this.format = format; + return this; + } + + public SequenceFileBolt withSyncPolicy(SyncPolicy syncPolicy) { + this.syncPolicy = syncPolicy; + return this; + } + + public SequenceFileBolt withRotationPolicy(FileRotationPolicy rotationPolicy) { + this.rotationPolicy = rotationPolicy; + return this; + } + + public SequenceFileBolt withCompressionType(SequenceFile.CompressionType compressionType) { + this.compressionType = compressionType; + return this; + } + + public SequenceFileBolt withTickTupleIntervalSeconds(int interval) { + this.tickTupleInterval = interval; + return this; + } + + public SequenceFileBolt addRotationAction(RotationAction action) { + this.rotationActions.add(action); + return this; + } + + public SequenceFileBolt withRetryCount(int fileRetryCount) { + this.fileRetryCount = fileRetryCount; + return this; + } + + public SequenceFileBolt withPartitioner(Partitioner partitioner) { + this.partitioner = partitioner; + return this; + } + + public SequenceFileBolt withMaxOpenFiles(int maxOpenFiles) { + this.maxOpenFiles = maxOpenFiles; + return this; + } + + @Override + public void doPrepare(Map conf, TopologyContext topologyContext, OutputCollector collector) throws IOException { + LOG.info("Preparing Sequence File Bolt..."); + if (this.format == null) { + throw new IllegalStateException("SequenceFormat must be specified."); + } + + this.fs = FileSystem.get(URI.create(this.fsUrl), hdfsConfig); + this.codecFactory = new CompressionCodecFactory(hdfsConfig); + } + + @Override + protected String getWriterKey(Tuple tuple) { + return "CONSTANT"; + } + + @Override + protected AbstractHDFSWriter makeNewWriter(Path path, Tuple tuple) throws IOException { + SequenceFile.Writer writer = SequenceFile.createWriter( + this.hdfsConfig, + SequenceFile.Writer.file(path), + SequenceFile.Writer.keyClass(this.format.keyClass()), + SequenceFile.Writer.valueClass(this.format.valueClass()), + SequenceFile.Writer.compression(this.compressionType, this.codecFactory.getCodecByName(this.compressionCodec)) + ); + + return new SequenceFileWriter(this.rotationPolicy, path, writer, this.format); + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/Writer.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/Writer.java new file mode 100644 index 00000000000..c6b3fb3d249 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/Writer.java @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.bolt; + +import java.io.IOException; +import org.apache.hadoop.fs.Path; +import org.apache.storm.tuple.Tuple; + +public interface Writer { + long write(Tuple tuple) throws IOException; + + void sync() throws IOException; + + void close() throws IOException; + + boolean needsRotation(); + + Path getFilePath(); +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/DefaultFileNameFormat.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/DefaultFileNameFormat.java new file mode 100644 index 00000000000..157929c3579 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/DefaultFileNameFormat.java @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.bolt.format; + +import java.util.Map; +import org.apache.storm.task.TopologyContext; + +/** + * Creates file names with the following format: + *

+ *     {prefix}{componentId}-{taskId}-{rotationNum}-{timestamp}{extension}
+ * 
+ * For example: + *
+ *     MyBolt-5-7-1390579837830.txt
+ * 
+ * + *

By default, prefix is empty and extenstion is ".txt". + * + */ +public class DefaultFileNameFormat implements FileNameFormat { + private String componentId; + private int taskId; + private String path = "/storm"; + private String prefix = ""; + private String extension = ".txt"; + + /** + * Overrides the default prefix. + */ + public DefaultFileNameFormat withPrefix(String prefix) { + this.prefix = prefix; + return this; + } + + /** + * Overrides the default file extension. + */ + public DefaultFileNameFormat withExtension(String extension) { + this.extension = extension; + return this; + } + + public DefaultFileNameFormat withPath(String path) { + this.path = path; + return this; + } + + @Override + public void prepare(Map conf, TopologyContext topologyContext) { + this.componentId = topologyContext.getThisComponentId(); + this.taskId = topologyContext.getThisTaskId(); + } + + @Override + public String getName(long rotation, long timeStamp) { + return this.prefix + this.componentId + "-" + this.taskId + "-" + rotation + "-" + timeStamp + this.extension; + } + + @Override + public String getPath() { + return this.path; + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/DefaultSequenceFormat.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/DefaultSequenceFormat.java new file mode 100644 index 00000000000..8db7ffc3834 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/DefaultSequenceFormat.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.hdfs.bolt.format; + +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.storm.tuple.Tuple; + +/** + * Basic SequenceFormat implementation that uses + * LongWritable for keys and Text for values. + */ +public class DefaultSequenceFormat implements SequenceFormat { + private transient LongWritable key; + private transient Text value; + + private String keyField; + private String valueField; + + public DefaultSequenceFormat(String keyField, String valueField) { + this.keyField = keyField; + this.valueField = valueField; + } + + @Override + public Class keyClass() { + return LongWritable.class; + } + + @Override + public Class valueClass() { + return Text.class; + } + + @Override + public Writable key(Tuple tuple) { + if (this.key == null) { + this.key = new LongWritable(); + } + this.key.set(tuple.getLongByField(this.keyField)); + return this.key; + } + + @Override + public Writable value(Tuple tuple) { + if (this.value == null) { + this.value = new Text(); + } + this.value.set(tuple.getStringByField(this.valueField)); + return this.value; + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/DelimitedRecordFormat.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/DelimitedRecordFormat.java new file mode 100644 index 00000000000..f8cdad92923 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/DelimitedRecordFormat.java @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.bolt.format; + +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; + +/** + * RecordFormat implementation that uses field and record delimiters. + * By default uses a comma (",") as the field delimiter and a + * newline ("\n") as the record delimiter. + * + *

Also by default, this implementation will output all the + * field values in the tuple in the order they were declared. To + * override this behavior, call withFields() to + * specify which tuple fields to output. + * + */ +public class DelimitedRecordFormat implements RecordFormat { + public static final String DEFAULT_FIELD_DELIMITER = ","; + public static final String DEFAULT_RECORD_DELIMITER = "\n"; + private String fieldDelimiter = DEFAULT_FIELD_DELIMITER; + private String recordDelimiter = DEFAULT_RECORD_DELIMITER; + private Fields fields = null; + + /** + * Only output the specified fields. + */ + public DelimitedRecordFormat withFields(Fields fields) { + this.fields = fields; + return this; + } + + /** + * Overrides the default field delimiter. + */ + public DelimitedRecordFormat withFieldDelimiter(String delimiter) { + this.fieldDelimiter = delimiter; + return this; + } + + /** + * Overrides the default record delimiter. + */ + public DelimitedRecordFormat withRecordDelimiter(String delimiter) { + this.recordDelimiter = delimiter; + return this; + } + + @Override + public byte[] format(Tuple tuple) { + StringBuilder sb = new StringBuilder(); + Fields fields = this.fields == null ? tuple.getFields() : this.fields; + int size = fields.size(); + for (int i = 0; i < size; i++) { + sb.append(tuple.getValueByField(fields.get(i))); + if (i != size - 1) { + sb.append(this.fieldDelimiter); + } + } + sb.append(this.recordDelimiter); + return sb.toString().getBytes(); + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/FileNameFormat.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/FileNameFormat.java new file mode 100644 index 00000000000..70210a934b0 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/FileNameFormat.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.bolt.format; + +import java.io.Serializable; +import java.util.Map; +import org.apache.storm.task.TopologyContext; + +/** + * Formatter interface for determining HDFS file names. + * + */ +public interface FileNameFormat extends Serializable { + + void prepare(Map conf, TopologyContext topologyContext); + + /** + * Returns the filename the HdfsBolt will create. + * @param rotation the current file rotation number (incremented on every rotation) + * @param timeStamp current time in milliseconds when the rotation occurs + */ + String getName(long rotation, long timeStamp); + + String getPath(); +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/RecordFormat.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/RecordFormat.java new file mode 100644 index 00000000000..5102f381863 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/RecordFormat.java @@ -0,0 +1,23 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.bolt.format; + +import java.io.Serializable; +import org.apache.storm.tuple.Tuple; + +/** + * Formats a Tuple object into a byte array that will be written to HDFS. + */ +public interface RecordFormat extends Serializable { + byte[] format(Tuple tuple); +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/SequenceFormat.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/SequenceFormat.java new file mode 100644 index 00000000000..7ea05a9c147 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/SequenceFormat.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.hdfs.bolt.format; + +import java.io.Serializable; +import org.apache.storm.tuple.Tuple; + +/** + * Interface for converting Tuple objects to HDFS sequence file key-value pairs. + */ +public interface SequenceFormat extends Serializable { + /** + * Key class used by implementation (e.g. IntWritable.class, etc.). + */ + Class keyClass(); + + /** + * Value class used by implementation (e.g. Text.class, etc.). + */ + Class valueClass(); + + /** + * Given a tuple, return the key that should be written to the sequence file. + */ + Object key(Tuple tuple); + + /** + * Given a tuple, return the value that should be written to the sequence file. + */ + Object value(Tuple tuple); +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/SimpleFileNameFormat.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/SimpleFileNameFormat.java new file mode 100644 index 00000000000..d80aaa8cd9f --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/SimpleFileNameFormat.java @@ -0,0 +1,94 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.bolt.format; + +import java.net.UnknownHostException; +import java.text.SimpleDateFormat; +import java.util.Date; +import java.util.Map; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.utils.Utils; + +public class SimpleFileNameFormat implements FileNameFormat { + + private static final long serialVersionUID = 1L; + + private String componentId; + private int taskId; + private String host; + private String path = "/storm"; + private String name = "$TIME.$NUM.txt"; + private String timeFormat = "yyyyMMddHHmmss"; + + @Override + public String getName(long rotation, long timeStamp) { + // compile parameters + SimpleDateFormat dateFormat = new SimpleDateFormat(timeFormat); + String ret = name + .replace("$TIME", dateFormat.format(new Date(timeStamp))) + .replace("$NUM", String.valueOf(rotation)) + .replace("$HOST", host) + .replace("$COMPONENT", componentId) + .replace("$TASK", String.valueOf(taskId)); + return ret; + } + + @Override + public String getPath() { + return path; + } + + @SuppressWarnings("unchecked") + @Override + public void prepare(Map conf, TopologyContext topologyContext) { + this.componentId = topologyContext.getThisComponentId(); + this.taskId = topologyContext.getThisTaskId(); + try { + this.host = Utils.localHostname(); + } catch (UnknownHostException e) { + throw new RuntimeException(e); + } + } + + public SimpleFileNameFormat withPath(String path) { + this.path = path; + return this; + } + + /** + * support parameters:
+ * $TIME - current time. use withTimeFormat to format.
+ * $NUM - rotation number
+ * $HOST - local host name
+ * $COMPONENT - component id
+ * $TASK - task id
+ * + * @param name file name + */ + public SimpleFileNameFormat withName(String name) { + this.name = name; + return this; + } + + public SimpleFileNameFormat withTimeFormat(String timeFormat) { + //check format + try { + new SimpleDateFormat(timeFormat); + } catch (Exception e) { + throw new IllegalArgumentException("invalid timeFormat: " + e.getMessage()); + } + this.timeFormat = timeFormat; + return this; + } + +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/FileRotationPolicy.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/FileRotationPolicy.java new file mode 100644 index 00000000000..13229dd125d --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/FileRotationPolicy.java @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.bolt.rotation; + +import java.io.Serializable; +import org.apache.storm.tuple.Tuple; + +/** + * Used by the HdfsBolt to decide when to rotate files. + * + *

The HdfsBolt will call the mark() method for every + * tuple received. If the mark() method returns + * true the HdfsBolt will perform a file rotation. + * + *

After file rotation, the HdfsBolt will call the reset() + * method. + */ +public interface FileRotationPolicy extends Serializable { + /** + * Called for every tuple the HdfsBolt executes. + * + * @param tuple The tuple executed. + * @param offset current offset of file being written + * @return true if a file rotation should be performed + */ + boolean mark(Tuple tuple, long offset); + + + /** + * Called after the HdfsBolt rotates a file. + */ + void reset(); + + /** + * Must be able to copy the rotation policy. + */ + FileRotationPolicy copy(); +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/FileSizeRotationPolicy.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/FileSizeRotationPolicy.java new file mode 100644 index 00000000000..eca10d67101 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/FileSizeRotationPolicy.java @@ -0,0 +1,81 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.bolt.rotation; + +import org.apache.storm.tuple.Tuple; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * File rotation policy that will rotate files when a certain + * file size is reached. + * + *

For example: + *

+ *     // rotate when files reach 5MB
+ *     FileSizeRotationPolicy policy =
+ *          new FileSizeRotationPolicy(5.0, Units.MB);
+ * 
+ * + */ +public class FileSizeRotationPolicy implements FileRotationPolicy { + private static final Logger LOG = LoggerFactory.getLogger(FileSizeRotationPolicy.class); + private long maxBytes; + private long lastOffset = 0; + private long currentBytesWritten = 0; + + public FileSizeRotationPolicy(float count, Units units) { + this.maxBytes = (long) (count * units.getByteCount()); + } + + protected FileSizeRotationPolicy(long maxBytes) { + this.maxBytes = maxBytes; + } + + @Override + public boolean mark(Tuple tuple, long offset) { + long diff = offset - this.lastOffset; + this.currentBytesWritten += diff; + this.lastOffset = offset; + return this.currentBytesWritten >= this.maxBytes; + } + + @Override + public void reset() { + this.currentBytesWritten = 0; + this.lastOffset = 0; + } + + @Override + public FileRotationPolicy copy() { + return new FileSizeRotationPolicy(this.maxBytes); + } + + public enum Units { + + KB((long) Math.pow(2, 10)), + MB((long) Math.pow(2, 20)), + GB((long) Math.pow(2, 30)), + TB((long) Math.pow(2, 40)); + + private long byteCount; + + Units(long byteCount) { + this.byteCount = byteCount; + } + + public long getByteCount() { + return byteCount; + } + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/NoRotationPolicy.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/NoRotationPolicy.java new file mode 100644 index 00000000000..f25be14da9f --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/NoRotationPolicy.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.bolt.rotation; + +import org.apache.storm.tuple.Tuple; + +/** + * File rotation policy that will never rotate... + * Just one big file. Intended for testing purposes. + */ +public class NoRotationPolicy implements FileRotationPolicy { + @Override + public boolean mark(Tuple tuple, long offset) { + return false; + } + + @Override + public void reset() { + } + + @Override + public FileRotationPolicy copy() { + return this; + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/TimedRotationPolicy.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/TimedRotationPolicy.java new file mode 100644 index 00000000000..c2f7e16797d --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/TimedRotationPolicy.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.hdfs.bolt.rotation; + +import org.apache.storm.tuple.Tuple; + +public class TimedRotationPolicy implements FileRotationPolicy { + + private long interval; + + public TimedRotationPolicy(float count, TimeUnit units) { + this.interval = (long) (count * units.getMilliSeconds()); + } + + protected TimedRotationPolicy(long interval) { + this.interval = interval; + } + + /** + * Called for every tuple the HdfsBolt executes. + * + * @param tuple The tuple executed. + * @param offset current offset of file being written + * @return true if a file rotation should be performed + */ + @Override + public boolean mark(Tuple tuple, long offset) { + return false; + } + + /** + * Called after the HdfsBolt rotates a file. + */ + @Override + public void reset() { + + } + + @Override + public FileRotationPolicy copy() { + return new TimedRotationPolicy(this.interval); + } + + public long getInterval() { + return this.interval; + } + + public enum TimeUnit { + + SECONDS((long) 1000), + MINUTES((long) 1000 * 60), + HOURS((long) 1000 * 60 * 60), + DAYS((long) 1000 * 60 * 60 * 24); + + private long milliSeconds; + + TimeUnit(long milliSeconds) { + this.milliSeconds = milliSeconds; + } + + public long getMilliSeconds() { + return milliSeconds; + } + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/sync/CountSyncPolicy.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/sync/CountSyncPolicy.java new file mode 100644 index 00000000000..a048dc3002e --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/sync/CountSyncPolicy.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.bolt.sync; + +import org.apache.storm.tuple.Tuple; + +/** + * SyncPolicy implementation that will trigger a + * file system sync after a certain number of tuples + * have been processed. + */ +public class CountSyncPolicy implements SyncPolicy { + private int count; + private int executeCount = 0; + + public CountSyncPolicy(int count) { + this.count = count; + } + + @Override + public boolean mark(Tuple tuple, long offset) { + this.executeCount++; + return this.executeCount >= this.count; + } + + @Override + public void reset() { + this.executeCount = 0; + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/sync/SyncPolicy.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/sync/SyncPolicy.java new file mode 100644 index 00000000000..12c673f3499 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/sync/SyncPolicy.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.bolt.sync; + +import java.io.Serializable; +import org.apache.storm.tuple.Tuple; + +/** + * Interface for controlling when the HdfsBolt + * syncs and flushes the filesystem. + * + */ +public interface SyncPolicy extends Serializable { + /** + * Called for every tuple the HdfsBolt executes. + * + * @param tuple The tuple executed. + * @param offset current offset for the file being written + * @return true if a sync should be performed + */ + boolean mark(Tuple tuple, long offset); + + + /** + * Called after the HdfsBolt performs a sync. + * + */ + void reset(); + +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/AbstractHDFSWriter.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/AbstractHDFSWriter.java new file mode 100644 index 00000000000..caf6b49f925 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/AbstractHDFSWriter.java @@ -0,0 +1,69 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.common; + +import java.io.IOException; +import org.apache.hadoop.fs.Path; +import org.apache.storm.hdfs.bolt.Writer; +import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy; +import org.apache.storm.tuple.Tuple; + +@SuppressWarnings("checkstyle:AbbreviationAsWordInName") +public abstract class AbstractHDFSWriter implements Writer { + protected final Path filePath; + protected final FileRotationPolicy rotationPolicy; + protected long lastUsedTime; + protected long offset; + protected boolean needsRotation; + + public AbstractHDFSWriter(FileRotationPolicy policy, Path path) { + //This must be defensively copied, because a bolt probably has only one rotation policy object + this.rotationPolicy = policy.copy(); + this.filePath = path; + } + + @Override + public final long write(Tuple tuple) throws IOException { + doWrite(tuple); + this.needsRotation = rotationPolicy.mark(tuple, offset); + + return this.offset; + } + + @Override + public final void sync() throws IOException { + doSync(); + } + + @Override + public final void close() throws IOException { + doClose(); + } + + @Override + public boolean needsRotation() { + return needsRotation; + } + + @Override + public Path getFilePath() { + return this.filePath; + } + + protected abstract void doWrite(Tuple tuple) throws IOException; + + protected abstract void doSync() throws IOException; + + protected abstract void doClose() throws IOException; + +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/AvroGenericRecordHDFSWriter.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/AvroGenericRecordHDFSWriter.java new file mode 100644 index 00000000000..713aa587a8b --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/AvroGenericRecordHDFSWriter.java @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.common; + +import java.io.IOException; +import java.util.EnumSet; +import org.apache.avro.Schema; +import org.apache.avro.file.DataFileWriter; +import org.apache.avro.generic.GenericDatumWriter; +import org.apache.avro.generic.GenericRecord; +import org.apache.avro.io.DatumWriter; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; +import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy; +import org.apache.storm.tuple.Tuple; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings("checkstyle:AbbreviationAsWordInName") +public class AvroGenericRecordHDFSWriter extends AbstractHDFSWriter { + + private static final Logger LOG = LoggerFactory.getLogger(AvroGenericRecordHDFSWriter.class); + + private FSDataOutputStream out; + private Schema schema; + private DataFileWriter avroWriter; + + public AvroGenericRecordHDFSWriter(FileRotationPolicy policy, Path path, FSDataOutputStream stream, Schema schema) throws IOException { + super(policy, path); + this.out = stream; + this.schema = schema; + DatumWriter datumWriter = new GenericDatumWriter<>(schema); + avroWriter = new DataFileWriter<>(datumWriter); + avroWriter.create(this.schema, this.out); + } + + @Override + protected void doWrite(Tuple tuple) throws IOException { + GenericRecord avroRecord = (GenericRecord) tuple.getValue(0); + avroWriter.append(avroRecord); + offset = this.out.getPos(); + + this.needsRotation = this.rotationPolicy.mark(tuple, offset); + } + + @Override + protected void doSync() throws IOException { + avroWriter.flush(); + + LOG.debug("Attempting to sync all data to filesystem"); + if (this.out instanceof HdfsDataOutputStream) { + ((HdfsDataOutputStream) this.out).hsync(EnumSet.of(HdfsDataOutputStream.SyncFlag.UPDATE_LENGTH)); + } else { + this.out.hsync(); + } + } + + @Override + protected void doClose() throws IOException { + this.avroWriter.close(); + this.out.close(); + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/HDFSWriter.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/HDFSWriter.java new file mode 100644 index 00000000000..8b3dcd1bf2d --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/HDFSWriter.java @@ -0,0 +1,61 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.common; + +import java.io.IOException; +import java.util.EnumSet; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; +import org.apache.storm.hdfs.bolt.format.RecordFormat; +import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy; +import org.apache.storm.tuple.Tuple; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings("checkstyle:AbbreviationAsWordInName") +public class HDFSWriter extends AbstractHDFSWriter { + + private static final Logger LOG = LoggerFactory.getLogger(HDFSWriter.class); + + private FSDataOutputStream out; + private RecordFormat format; + + public HDFSWriter(FileRotationPolicy policy, Path path, FSDataOutputStream out, RecordFormat format) { + super(policy, path); + this.out = out; + this.format = format; + } + + @Override + protected void doWrite(Tuple tuple) throws IOException { + byte[] bytes = this.format.format(tuple); + out.write(bytes); + this.offset += bytes.length; + } + + @Override + protected void doSync() throws IOException { + LOG.info("Attempting to sync all data to filesystem"); + if (this.out instanceof HdfsDataOutputStream) { + ((HdfsDataOutputStream) this.out).hsync(EnumSet.of(HdfsDataOutputStream.SyncFlag.UPDATE_LENGTH)); + } else { + this.out.hsync(); + } + } + + @Override + protected void doClose() throws IOException { + this.out.close(); + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/HdfsUtils.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/HdfsUtils.java new file mode 100644 index 00000000000..462087eebc4 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/HdfsUtils.java @@ -0,0 +1,93 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.common; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; +import org.apache.hadoop.ipc.RemoteException; + +public class HdfsUtils { + /** list files sorted by modification time that have not been modified since 'olderThan'. if + * 'olderThan' is <= 0 then the filtering is disabled */ + public static ArrayList listFilesByModificationTime(FileSystem fs, Path directory, long olderThan) + throws IOException { + ArrayList fstats = new ArrayList<>(); + + RemoteIterator itr = fs.listFiles(directory, false); + while (itr.hasNext()) { + LocatedFileStatus fileStatus = itr.next(); + if (olderThan > 0) { + if (fileStatus.getModificationTime() <= olderThan) { + fstats.add(fileStatus); + } + } else { + fstats.add(fileStatus); + } + } + Collections.sort(fstats, new ModifTimeComparator()); + + ArrayList result = new ArrayList<>(fstats.size()); + for (LocatedFileStatus fstat : fstats) { + result.add(fstat.getPath()); + } + return result; + } + + /** + * Returns null if file already exists. throws if there was unexpected problem + */ + public static FSDataOutputStream tryCreateFile(FileSystem fs, Path file) throws IOException { + try { + FSDataOutputStream os = fs.create(file, false); + return os; + } catch (FileAlreadyExistsException e) { + return null; + } catch (RemoteException e) { + if (e.unwrapRemoteException() instanceof AlreadyBeingCreatedException) { + return null; + } else { // unexpected error + throw e; + } + } + } + + public static class Pair { + private K key; + private V value; + + public Pair(K key, V value) { + this.key = key; + this.value = value; + } + + public static Pair of(K key, V value) { + return new Pair(key, value); + } + + public K getKey() { + return key; + } + + public V getValue() { + return value; + } + } // class Pair +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/ModifTimeComparator.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/ModifTimeComparator.java new file mode 100644 index 00000000000..47ebdfe304e --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/ModifTimeComparator.java @@ -0,0 +1,25 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.common; + +import java.util.Comparator; +import org.apache.hadoop.fs.FileStatus; + + +public class ModifTimeComparator + implements Comparator { + @Override + public int compare(FileStatus o1, FileStatus o2) { + return new Long(o1.getModificationTime()).compareTo(o2.getModificationTime()); + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/NullPartitioner.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/NullPartitioner.java new file mode 100644 index 00000000000..3137f48b099 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/NullPartitioner.java @@ -0,0 +1,26 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.common; + +import org.apache.storm.tuple.Tuple; + +/** + * The NullPartitioner partitions every tuple to the empty string. In otherwords, no partition sub directories will + * be added to the path. + */ +public class NullPartitioner implements Partitioner { + @Override + public String getPartitionPath(final Tuple tuple) { + return ""; + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/Partitioner.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/Partitioner.java new file mode 100644 index 00000000000..92f674733b1 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/Partitioner.java @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.common; + +import java.io.Serializable; +import org.apache.storm.tuple.Tuple; + +public interface Partitioner extends Serializable { + + /** + * Return a relative path that the tuple should be written to. For example, if an HdfsBolt were configured to write + * to /common/output and a partitioner returned "/foo" then the bolt should open a file in "/common/output/foo" + * + *

A best practice is to use Path.SEPARATOR instead of a literal "/" + * + * @param tuple The tuple for which the relative path is being calculated. + */ + String getPartitionPath(Tuple tuple); +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/SequenceFileWriter.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/SequenceFileWriter.java new file mode 100644 index 00000000000..d0507b84ee5 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/SequenceFileWriter.java @@ -0,0 +1,53 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.common; + +import java.io.IOException; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.SequenceFile; +import org.apache.storm.hdfs.bolt.format.SequenceFormat; +import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy; +import org.apache.storm.tuple.Tuple; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class SequenceFileWriter extends AbstractHDFSWriter { + + private static final Logger LOG = LoggerFactory.getLogger(SequenceFileWriter.class); + + private SequenceFile.Writer writer; + private SequenceFormat format; + + public SequenceFileWriter(FileRotationPolicy policy, Path path, SequenceFile.Writer writer, SequenceFormat format) { + super(policy, path); + this.writer = writer; + this.format = format; + } + + @Override + protected void doWrite(Tuple tuple) throws IOException { + this.writer.append(this.format.key(tuple), this.format.value(tuple)); + this.offset = this.writer.getLength(); + } + + @Override + protected void doSync() throws IOException { + LOG.debug("Attempting to sync all data to filesystem"); + this.writer.hsync(); + } + + @Override + protected void doClose() throws IOException { + this.writer.close(); + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/rotation/MoveFileAction.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/rotation/MoveFileAction.java new file mode 100644 index 00000000000..0e8e1dd38b0 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/rotation/MoveFileAction.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.hdfs.common.rotation; + +import java.io.IOException; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class MoveFileAction implements RotationAction { + private static final Logger LOG = LoggerFactory.getLogger(MoveFileAction.class); + + private String destination; + + public MoveFileAction toDestination(String destDir) { + destination = destDir; + return this; + } + + @Override + public void execute(FileSystem fileSystem, Path filePath) throws IOException { + Path destPath = new Path(destination, filePath.getName()); + LOG.info("Moving file {} to {}", filePath, destPath); + boolean success = fileSystem.rename(filePath, destPath); + return; + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/rotation/RotationAction.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/rotation/RotationAction.java new file mode 100644 index 00000000000..bad3d0649a7 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/rotation/RotationAction.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.hdfs.common.rotation; + +import java.io.IOException; +import java.io.Serializable; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; + +public interface RotationAction extends Serializable { + void execute(FileSystem fileSystem, Path filePath) throws IOException; +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/AbstractFileReader.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/AbstractFileReader.java new file mode 100644 index 00000000000..614d2409510 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/AbstractFileReader.java @@ -0,0 +1,58 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.spout; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; + + +abstract class AbstractFileReader implements FileReader { + + private final Path file; + + AbstractFileReader(FileSystem fs, Path file) { + if (fs == null) { + throw new IllegalArgumentException("filesystem arg cannot be null for reader"); + } + if (file == null) { + throw new IllegalArgumentException("file arg cannot be null for reader"); + } + this.file = file; + } + + @Override + public Path getFilePath() { + return file; + } + + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + AbstractFileReader that = (AbstractFileReader) o; + + return !(file != null ? !file.equals(that.file) : that.file != null); + } + + @Override + public int hashCode() { + return file != null ? file.hashCode() : 0; + } + +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/Configs.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/Configs.java new file mode 100644 index 00000000000..9c54a19b6d8 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/Configs.java @@ -0,0 +1,137 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.spout; + +import org.apache.storm.validation.ConfigValidation.Validator; +import org.apache.storm.validation.ConfigValidationAnnotations.CustomValidator; +import org.apache.storm.validation.ConfigValidationAnnotations.IsBoolean; +import org.apache.storm.validation.ConfigValidationAnnotations.IsInteger; +import org.apache.storm.validation.ConfigValidationAnnotations.IsMapEntryType; +import org.apache.storm.validation.ConfigValidationAnnotations.IsPositiveNumber; +import org.apache.storm.validation.ConfigValidationAnnotations.IsString; +import org.apache.storm.validation.NotConf; +import org.apache.storm.validation.Validated; + +public class Configs implements Validated { + /** + * Required - chose the file type being consumed. + * @deprecated please use {@link HdfsSpout#setReaderType(String)} + */ + @Deprecated + @IsString + @CustomValidator(validatorClass = ReaderTypeValidator.class) + public static final String READER_TYPE = "hdfsspout.reader.type"; + public static final String TEXT = "text"; + public static final String SEQ = "seq"; + /** + * Required - HDFS name node. + * @deprecated please use {@link HdfsSpout#setHdfsUri(String)} + */ + @Deprecated + @IsString + public static final String HDFS_URI = "hdfsspout.hdfs"; + /** + * Required - dir from which to read files. + * @deprecated please use {@link HdfsSpout#setSourceDir(String)} + */ + @Deprecated + @IsString + public static final String SOURCE_DIR = "hdfsspout.source.dir"; + /** + * Required - completed files will be moved here. + * @deprecated please use {@link HdfsSpout#setArchiveDir(String)} + */ + @Deprecated + @IsString + public static final String ARCHIVE_DIR = "hdfsspout.archive.dir"; + /** + * Required - unparsable files will be moved here. + * @deprecated please use {@link HdfsSpout#setBadFilesDir(String)} + */ + @Deprecated + @IsString + public static final String BAD_DIR = "hdfsspout.badfiles.dir"; + /** + * Directory in which lock files will be created. + * @deprecated please use {@link HdfsSpout#setLockDir(String)} + */ + @Deprecated + @IsString + public static final String LOCK_DIR = "hdfsspout.lock.dir"; + /** + * Commit after N records. 0 disables this. + * @deprecated please use {@link HdfsSpout#setCommitFrequencyCount(int)} + */ + @Deprecated + @IsInteger + @IsPositiveNumber(includeZero = true) + public static final String COMMIT_FREQ_COUNT = "hdfsspout.commit.count"; + /** + * Commit after N secs. cannot be disabled. + * @deprecated please use {@link HdfsSpout#setCommitFrequencySec(int)} + */ + @Deprecated + @IsInteger + @IsPositiveNumber + public static final String COMMIT_FREQ_SEC = "hdfsspout.commit.sec"; + /** + * Max outstanding. + * @deprecated please use {@link HdfsSpout#setMaxOutstanding(int)} + */ + @Deprecated + @IsInteger + @IsPositiveNumber(includeZero = true) + public static final String MAX_OUTSTANDING = "hdfsspout.max.outstanding"; + /** + * Lock timeout. + * @deprecated please use {@link HdfsSpout#setLockTimeoutSec(int)} + */ + @Deprecated + @IsInteger + @IsPositiveNumber + public static final String LOCK_TIMEOUT = "hdfsspout.lock.timeout.sec"; + /** + * If clocks on machines in the Storm cluster are in sync inactivity duration after which locks are considered + * candidates for being reassigned to another spout. + * + * @deprecated please use {@link HdfsSpout#setClocksInSync(boolean)} + */ + @Deprecated + @IsBoolean + public static final String CLOCKS_INSYNC = "hdfsspout.clocks.insync"; + /** + * Ignore suffix. + * @deprecated please use {@link HdfsSpout#setIgnoreSuffix(String)} + */ + @Deprecated + @IsString + public static final String IGNORE_SUFFIX = "hdfsspout.ignore.suffix"; + /** + * Filenames with this suffix in archive dir will be ignored by the Spout. + */ + @NotConf + public static final String DEFAULT_LOCK_DIR = ".lock"; + public static final int DEFAULT_COMMIT_FREQ_COUNT = 20000; + public static final int DEFAULT_COMMIT_FREQ_SEC = 10; + public static final int DEFAULT_MAX_OUTSTANDING = 10000; + public static final int DEFAULT_LOCK_TIMEOUT = 5 * 60; // 5 min + @IsMapEntryType(keyType = String.class, valueType = String.class) + public static final String DEFAULT_HDFS_CONFIG_KEY = "hdfs.config"; + + public static class ReaderTypeValidator extends Validator { + @Override + public void validateField(String name, Object o) { + HdfsSpout.checkValidReader((String) o); + } + } +} // class Configs diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/DirLock.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/DirLock.java new file mode 100644 index 00000000000..488531a040d --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/DirLock.java @@ -0,0 +1,130 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.spout; + +import java.io.IOException; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.storm.hdfs.common.HdfsUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Facility to synchronize access to HDFS directory. The lock itself is represented + * as a file in the same directory. Relies on atomic file creation. + */ +public class DirLock { + public static final String DIR_LOCK_FILE = "DIRLOCK"; + private static final Logger LOG = LoggerFactory.getLogger(DirLock.class); + private final Path lockFile; + private FileSystem fs; + + private DirLock(FileSystem fs, Path lockFile) throws IOException { + if (fs.isDirectory(lockFile)) { + throw new IllegalArgumentException(lockFile.toString() + " is not a directory"); + } + this.fs = fs; + this.lockFile = lockFile; + } + + /** Get a lock on file if not already locked. + * + * @param dir the dir on which to get a lock + * @return The lock object if it the lock was acquired. Returns null if the dir is already locked. + * @throws IOException if there were errors + */ + public static DirLock tryLock(FileSystem fs, Path dir) throws IOException { + Path lockFile = getDirLockFile(dir); + + try { + FSDataOutputStream ostream = HdfsUtils.tryCreateFile(fs, lockFile); + if (ostream != null) { + LOG.debug("Thread ({}) Acquired lock on dir {}", threadInfo(), dir); + ostream.close(); + return new DirLock(fs, lockFile); + } else { + LOG.debug("Thread ({}) cannot lock dir {} as its already locked.", threadInfo(), dir); + return null; + } + } catch (IOException e) { + LOG.error("Error when acquiring lock on dir " + dir, e); + throw e; + } + } + + private static Path getDirLockFile(Path dir) { + return new Path(dir.toString() + Path.SEPARATOR_CHAR + DIR_LOCK_FILE); + } + + private static String threadInfo() { + return "ThdId=" + Thread.currentThread().getId() + ", ThdName=" + + Thread.currentThread().getName(); + } + + /** + * if the lock on the directory is stale, take ownership. + */ + public static DirLock takeOwnershipIfStale(FileSystem fs, Path dirToLock, int lockTimeoutSec) { + Path dirLockFile = getDirLockFile(dirToLock); + + long now = System.currentTimeMillis(); + long expiryTime = now - (lockTimeoutSec * 1000); + + try { + long modTime = fs.getFileStatus(dirLockFile).getModificationTime(); + if (modTime <= expiryTime) { + return takeOwnership(fs, dirLockFile); + } + return null; + } catch (IOException e) { + return null; + } + } + + private static DirLock takeOwnership(FileSystem fs, Path dirLockFile) throws IOException { + if (fs instanceof DistributedFileSystem) { + if (!((DistributedFileSystem) fs).recoverLease(dirLockFile)) { + LOG.warn("Unable to recover lease on dir lock file " + dirLockFile + + " right now. Cannot transfer ownership. Will need to try later."); + return null; + } + } + + // delete and recreate lock file + if (fs.delete(dirLockFile, false)) { // returns false if somebody else already deleted it (to take ownership) + FSDataOutputStream ostream = HdfsUtils.tryCreateFile(fs, dirLockFile); + if (ostream != null) { + ostream.close(); + } + return new DirLock(fs, dirLockFile); + } + return null; + } + + /** + * Release lock on dir by deleting the lock file. + */ + public void release() throws IOException { + if (!fs.delete(lockFile, false)) { + LOG.error("Thread {} could not delete dir lock {} ", threadInfo(), lockFile); + } else { + LOG.debug("Thread {} Released dir lock {} ", threadInfo(), lockFile); + } + } + + public Path getLockFile() { + return lockFile; + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/FileLock.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/FileLock.java new file mode 100644 index 00000000000..7ff5aaaa29c --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/FileLock.java @@ -0,0 +1,328 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.spout; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.util.Collection; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.storm.hdfs.common.HdfsUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Facility to synchronize access to HDFS files. Thread gains exclusive access to a file by acquiring + * a FileLock object. The lock itself is represented as file on HDFS. Relies on atomic file creation. + * Owning thread must heartbeat periodically on the lock to prevent the lock from being deemed as + * stale (i.e. lock whose owning thread have died). + */ +public class FileLock { + + private static final Logger LOG = LoggerFactory.getLogger(FileLock.class); + private final FileSystem fs; + private final String componentId; + private final Path lockFile; + private final FSDataOutputStream lockFileStream; + private LogEntry lastEntry; + + private FileLock(FileSystem fs, Path lockFile, FSDataOutputStream lockFileStream, String spoutId) + throws IOException { + this.fs = fs; + this.lockFile = lockFile; + this.lockFileStream = lockFileStream; + this.componentId = spoutId; + logProgress("0", false); + } + + private FileLock(FileSystem fs, Path lockFile, String spoutId, LogEntry entry) + throws IOException { + this.fs = fs; + this.lockFile = lockFile; + this.lockFileStream = fs.append(lockFile); + this.componentId = spoutId; + LOG.info("Acquired abandoned lockFile {}, Spout {}", lockFile, spoutId); + logProgress(entry.fileOffset, true); + } + + /** + * returns lock on file or null if file is already locked. throws if unexpected problem + */ + public static FileLock tryLock(FileSystem fs, Path fileToLock, Path lockDirPath, String spoutId) + throws IOException { + Path lockFile = new Path(lockDirPath, fileToLock.getName()); + + try { + FSDataOutputStream ostream = HdfsUtils.tryCreateFile(fs, lockFile); + if (ostream != null) { + LOG.debug("Acquired lock on file {}. LockFile= {}, Spout = {}", fileToLock, lockFile, spoutId); + return new FileLock(fs, lockFile, ostream, spoutId); + } else { + LOG.debug("Cannot lock file {} as its already locked. Spout = {}", fileToLock, spoutId); + return null; + } + } catch (IOException e) { + LOG.error("Error when acquiring lock on file " + fileToLock + " Spout = " + spoutId, e); + throw e; + } + } + + /** + * checks if lockFile is older than 'olderThan' UTC time by examining the modification time + * on file and (if necessary) the timestamp in last log entry in the file. If its stale, then + * returns the last log entry, else returns null. + * + * @param olderThan time (millis) in UTC. + * @return the last entry in the file if its too old. null if last entry is not too old + */ + public static LogEntry getLastEntryIfStale(FileSystem fs, Path lockFile, long olderThan) + throws IOException { + long modifiedTime = fs.getFileStatus(lockFile).getModificationTime(); + if (modifiedTime <= olderThan) { // look + //Impt: HDFS timestamp may not reflect recent appends, so we double check the + // timestamp in last line of file to see when the last update was made + LogEntry lastEntry = getLastEntry(fs, lockFile); + if (lastEntry == null) { + LOG.warn("Empty lock file found. Deleting it. {}", lockFile); + try { + if (!fs.delete(lockFile, false)) { + throw new IOException("Empty lock file deletion failed"); + } + } catch (Exception e) { + LOG.error("Unable to delete empty lock file " + lockFile, e); + } + } + if (lastEntry.eventTime <= olderThan) { + return lastEntry; + } + } + return null; + } + + /** + * returns the last log entry. + */ + public static LogEntry getLastEntry(FileSystem fs, Path lockFile) + throws IOException { + FSDataInputStream in = fs.open(lockFile); + BufferedReader reader = new BufferedReader(new InputStreamReader(in)); + String lastLine = null; + for (String line = reader.readLine(); line != null; line = reader.readLine()) { + lastLine = line; + } + return LogEntry.deserialize(lastLine); + } + + /** + * Takes ownership of the lock file if possible. + * @param lastEntry last entry in the lock file. this param is an optimization. + * we dont scan the lock file again to find its last entry here since + * its already been done once by the logic used to check if the lock + * file is stale. so this value comes from that earlier scan. + * @param spoutId spout id + * @return null if lock File is not recoverable + * @throws IOException if unable to acquire + */ + public static FileLock takeOwnership(FileSystem fs, Path lockFile, LogEntry lastEntry, String spoutId) + throws IOException { + try { + if (fs instanceof DistributedFileSystem) { + if (!((DistributedFileSystem) fs).recoverLease(lockFile)) { + LOG.warn( + "Unable to recover lease on lock file {} right now. Cannot transfer ownership. Will need to try later. Spout = {}", + lockFile, spoutId); + return null; + } + } + return new FileLock(fs, lockFile, spoutId, lastEntry); + } catch (IOException e) { + if (e instanceof RemoteException + && ((RemoteException) e).unwrapRemoteException() instanceof AlreadyBeingCreatedException) { + LOG.warn( + "Lock file " + lockFile + "is currently open. Cannot transfer ownership now. Will need to try later. Spout= " + + spoutId, + e); + return null; + } else { // unexpected error + LOG.warn("Cannot transfer ownership now for lock file " + lockFile + ". Will need to try later. Spout =" + spoutId, e); + throw e; + } + } + } + + /** + * Finds a oldest expired lock file (using modification timestamp), then takes + * ownership of the lock file. + * Impt: Assumes access to lockFilesDir has been externally synchronized such that + * only one thread accessing the same thread + */ + public static FileLock acquireOldestExpiredLock(FileSystem fs, Path lockFilesDir, int locktimeoutSec, String spoutId) + throws IOException { + // list files + long now = System.currentTimeMillis(); + long olderThan = now - (locktimeoutSec * 1000); + Collection listing = HdfsUtils.listFilesByModificationTime(fs, lockFilesDir, olderThan); + + // locate expired lock files (if any). Try to take ownership (oldest lock first) + for (Path file : listing) { + if (file.getName().equalsIgnoreCase(DirLock.DIR_LOCK_FILE)) { + continue; + } + LogEntry lastEntry = getLastEntryIfStale(fs, file, olderThan); + if (lastEntry != null) { + FileLock lock = FileLock.takeOwnership(fs, file, lastEntry, spoutId); + if (lock != null) { + return lock; + } + } + } + if (listing.isEmpty()) { + LOG.debug("No abandoned lock files found by Spout {}", spoutId); + } + return null; + } + + /** + * Finds oldest expired lock file (using modification timestamp), then takes + * ownership of the lock file. + * Impt: Assumes access to lockFilesDir has been externally synchronized such that + * only one thread accessing the same thread + * + * @return a Pair<lock file path, last entry in lock file> .. if expired lock file found + */ + public static HdfsUtils.Pair locateOldestExpiredLock(FileSystem fs, Path lockFilesDir, int locktimeoutSec) + throws IOException { + // list files + long now = System.currentTimeMillis(); + long olderThan = now - (locktimeoutSec * 1000); + Collection listing = HdfsUtils.listFilesByModificationTime(fs, lockFilesDir, olderThan); + + // locate oldest expired lock file (if any) and take ownership + for (Path file : listing) { + if (file.getName().equalsIgnoreCase(DirLock.DIR_LOCK_FILE)) { + continue; + } + LogEntry lastEntry = getLastEntryIfStale(fs, file, olderThan); + if (lastEntry != null) { + return new HdfsUtils.Pair<>(file, lastEntry); + } + } + LOG.debug("No abandoned files found"); + return null; + } + + public void heartbeat(String fileOffset) throws IOException { + logProgress(fileOffset, true); + } + + // new line is at beginning of each line (instead of end) for better recovery from + // partial writes of prior lines + private void logProgress(String fileOffset, boolean prefixNewLine) + throws IOException { + long now = System.currentTimeMillis(); + LogEntry entry = new LogEntry(now, componentId, fileOffset); + String line = entry.toString(); + if (prefixNewLine) { + lockFileStream.writeBytes(System.lineSeparator() + line); + } else { + lockFileStream.writeBytes(line); + } + lockFileStream.hflush(); + + lastEntry = entry; // update this only after writing to hdfs + } + + /** + * Release lock by deleting file. + * @throws IOException if lock file could not be deleted + */ + public void release() throws IOException { + lockFileStream.close(); + if (!fs.delete(lockFile, false)) { + LOG.warn("Unable to delete lock file, Spout = {}", componentId); + throw new IOException("Unable to delete lock file"); + } + LOG.debug("Released lock file {}. Spout {}", lockFile, componentId); + } + + // For testing only.. invoked via reflection + private void forceCloseLockFile() throws IOException { + lockFileStream.close(); + } + + public LogEntry getLastLogEntry() { + return lastEntry; + } + + public Path getLockFile() { + return lockFile; + } + + public static class LogEntry { + private static final int NUM_FIELDS = 3; + public final long eventTime; + public final String componentId; + public final String fileOffset; + + public LogEntry(long eventtime, String componentId, String fileOffset) { + this.eventTime = eventtime; + this.componentId = componentId; + this.fileOffset = fileOffset; + } + + public static LogEntry deserialize(String line) { + String[] fields = line.split(",", NUM_FIELDS); + return new LogEntry(Long.parseLong(fields[0]), fields[1], fields[2]); + } + + @Override + public String toString() { + return eventTime + "," + componentId + "," + fileOffset; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof LogEntry)) { + return false; + } + + LogEntry logEntry = (LogEntry) o; + + if (eventTime != logEntry.eventTime) { + return false; + } + if (!componentId.equals(logEntry.componentId)) { + return false; + } + return fileOffset.equals(logEntry.fileOffset); + + } + + @Override + public int hashCode() { + int result = (int) (eventTime ^ (eventTime >>> 32)); + result = 31 * result + componentId.hashCode(); + result = 31 * result + fileOffset.hashCode(); + return result; + } + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/FileOffset.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/FileOffset.java new file mode 100644 index 00000000000..a8b354a8dcf --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/FileOffset.java @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.spout; + +/** + * Represents the notion of an offset in a file. Idea is accommodate representing file + * offsets other than simple byte offset as it may be insufficient for certain formats. + * Reader for each format implements this as appropriate for its needs. + * Note: Derived types must : + * - implement equals() & hashCode() appropriately. + * - implement Comparable<> appropriately. + * - implement toString() appropriately for serialization. + * - constructor(string) for deserialization + */ + +interface FileOffset extends Comparable, Cloneable { + /** + * tests if rhs == currOffset+1. + */ + boolean isNextOffset(FileOffset rhs); + + FileOffset clone(); +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/FileReader.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/FileReader.java new file mode 100644 index 00000000000..b6e08f4523b --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/FileReader.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.spout; + +import java.io.IOException; +import java.util.List; +import org.apache.hadoop.fs.Path; + +interface FileReader { + Path getFilePath(); + + /** + * A simple numeric value may not be sufficient for certain formats consequently + * this is a String. + */ + FileOffset getFileOffset(); + + /** + * Get the next tuple from the file. + * + * @return null if no more data + */ + List next() throws IOException, ParseException; + + void close(); +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/HdfsSpout.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/HdfsSpout.java new file mode 100644 index 00000000000..446fb858577 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/HdfsSpout.java @@ -0,0 +1,836 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.spout; + +import java.io.IOException; +import java.lang.reflect.Constructor; +import java.net.URI; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Timer; +import java.util.TimerTask; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.atomic.AtomicBoolean; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.storm.Config; +import org.apache.storm.hdfs.common.HdfsUtils; +import org.apache.storm.hdfs.security.HdfsSecurityUtil; +import org.apache.storm.spout.SpoutOutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.topology.base.BaseRichSpout; +import org.apache.storm.tuple.Fields; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class HdfsSpout extends BaseRichSpout { + + // other members + private static final Logger LOG = LoggerFactory.getLogger(HdfsSpout.class); + private final AtomicBoolean commitTimeElapsed = new AtomicBoolean(false); + HashMap> inflight = new HashMap<>(); + LinkedBlockingQueue>> retryList = new LinkedBlockingQueue<>(); + HdfsUtils.Pair lastExpiredLock = null; + // user configurable + private String hdfsUri; // required + private String readerType; // required + private Fields outputFields; // required + private String sourceDir; // required + private Path sourceDirPath; // required + private String archiveDir; // required + private Path archiveDirPath; // required + private String badFilesDir; // required + private Path badFilesDirPath; // required + private String lockDir; + private Path lockDirPath; + private int commitFrequencyCount = Configs.DEFAULT_COMMIT_FREQ_COUNT; + private int commitFrequencySec = Configs.DEFAULT_COMMIT_FREQ_SEC; + private int maxOutstanding = Configs.DEFAULT_MAX_OUTSTANDING; + private int lockTimeoutSec = Configs.DEFAULT_LOCK_TIMEOUT; + private boolean clocksInSync = true; + private String inprogressSuffix = ".inprogress"; // not configurable to prevent change between topology restarts + private String ignoreSuffix = ".ignore"; + private String outputStreamName = null; + private ProgressTracker tracker = null; + private FileSystem hdfs; + private FileReader reader; + private SpoutOutputCollector collector; + private Configuration hdfsConfig; + private Map conf = null; + private FileLock lock; + private String spoutId = null; + private long lastExpiredLockTime = 0; + private long tupleCounter = 0; + private boolean ackEnabled = false; + private int acksSinceLastCommit = 0; + private Timer commitTimer; + private boolean fileReadCompletely = true; + + private String configKey = Configs.DEFAULT_HDFS_CONFIG_KEY; // key for hdfs Kerberos configs + + public HdfsSpout() { + } + + private static String getFileProgress(FileReader reader) { + return reader.getFilePath() + " " + reader.getFileOffset(); + } + + private static void releaseLockAndLog(FileLock fileLock, String spoutId) { + try { + if (fileLock != null) { + fileLock.release(); + LOG.debug("Spout {} released FileLock. SpoutId = {}", fileLock.getLockFile(), spoutId); + } + } catch (IOException e) { + LOG.error("Unable to delete lock file : " + fileLock.getLockFile() + " SpoutId =" + spoutId, e); + } + } + + private static void validateOrMakeDir(FileSystem fs, Path dir, String dirDescription) { + try { + if (fs.exists(dir)) { + if (!fs.isDirectory(dir)) { + LOG.error(dirDescription + " directory is a file, not a dir. " + dir); + throw new RuntimeException(dirDescription + " directory is a file, not a dir. " + dir); + } + } else if (!fs.mkdirs(dir)) { + LOG.error("Unable to create " + dirDescription + " directory " + dir); + throw new RuntimeException("Unable to create " + dirDescription + " directory " + dir); + } + } catch (IOException e) { + LOG.error("Unable to create " + dirDescription + " directory " + dir, e); + throw new RuntimeException("Unable to create " + dirDescription + " directory " + dir, e); + } + } + + static void checkValidReader(String readerType) { + if (readerType.equalsIgnoreCase(Configs.TEXT) || readerType.equalsIgnoreCase(Configs.SEQ)) { + return; + } + try { + Class classType = Class.forName(readerType); + classType.getConstructor(FileSystem.class, Path.class, Map.class); + if (!FileReader.class.isAssignableFrom(classType)) { + LOG.error(readerType + " not a FileReader"); + throw new IllegalArgumentException(readerType + " not a FileReader."); + } + return; + } catch (ClassNotFoundException e) { + LOG.error(readerType + " not found in classpath.", e); + throw new IllegalArgumentException(readerType + " not found in classpath.", e); + } catch (NoSuchMethodException e) { + LOG.error(readerType + " is missing the expected constructor for Readers.", e); + throw new IllegalArgumentException(readerType + " is missing the expected constuctor for Readers."); + } + } + + public HdfsSpout setHdfsUri(String hdfsUri) { + this.hdfsUri = hdfsUri; + return this; + } + + public HdfsSpout setReaderType(String readerType) { + this.readerType = readerType; + return this; + } + + public HdfsSpout setSourceDir(String sourceDir) { + this.sourceDir = sourceDir; + return this; + } + + public HdfsSpout setArchiveDir(String archiveDir) { + this.archiveDir = archiveDir; + return this; + } + + public HdfsSpout setBadFilesDir(String badFilesDir) { + this.badFilesDir = badFilesDir; + return this; + } + + public HdfsSpout setLockDir(String lockDir) { + this.lockDir = lockDir; + return this; + } + + public HdfsSpout setCommitFrequencyCount(int commitFrequencyCount) { + this.commitFrequencyCount = commitFrequencyCount; + return this; + } + + public HdfsSpout setCommitFrequencySec(int commitFrequencySec) { + this.commitFrequencySec = commitFrequencySec; + return this; + } + + public HdfsSpout setMaxOutstanding(int maxOutstanding) { + this.maxOutstanding = maxOutstanding; + return this; + } + + public HdfsSpout setLockTimeoutSec(int lockTimeoutSec) { + this.lockTimeoutSec = lockTimeoutSec; + return this; + } + + public HdfsSpout setClocksInSync(boolean clocksInSync) { + this.clocksInSync = clocksInSync; + return this; + } + + public HdfsSpout setIgnoreSuffix(String ignoreSuffix) { + this.ignoreSuffix = ignoreSuffix; + return this; + } + + /** + * Output field names. Number of fields depends upon the reader type + */ + public HdfsSpout withOutputFields(String... fields) { + outputFields = new Fields(fields); + return this; + } + + /** + * set key name under which HDFS options are placed. (similar to HDFS bolt). default key name is 'hdfs.config' + */ + public HdfsSpout withConfigKey(String configKey) { + this.configKey = configKey; + return this; + } + + /** + * Set output stream name. + */ + public HdfsSpout withOutputStream(String streamName) { + this.outputStreamName = streamName; + return this; + } + + public Path getLockDirPath() { + return lockDirPath; + } + + public SpoutOutputCollector getCollector() { + return collector; + } + + @Override + public void nextTuple() { + LOG.trace("Next Tuple {}", spoutId); + // 1) First re-emit any previously failed tuples (from retryList) + if (!retryList.isEmpty()) { + LOG.debug("Sending tuple from retry list"); + HdfsUtils.Pair> pair = retryList.remove(); + emitData(pair.getValue(), pair.getKey()); + return; + } + + if (ackEnabled && tracker.size() >= maxOutstanding) { + LOG.warn("Waiting for more ACKs before generating new tuples. " + + "Progress tracker size has reached limit {}, SpoutID {}", + maxOutstanding, spoutId); + // Don't emit anything .. allow configured spout wait strategy to kick in + return; + } + + // 2) If no failed tuples to be retried, then send tuples from hdfs + while (true) { + try { + // 3) Select a new file if one is not open already + boolean newReader = false; + if (reader == null) { + reader = pickNextFile(); + if (reader == null) { + LOG.debug("Currently no new files to process under : " + sourceDirPath); + return; + } else { + fileReadCompletely = false; + newReader = true; + } + } + if (fileReadCompletely) { // wait for more ACKs before proceeding + return; + } + // 4) Read record from file, emit to collector and record progress + List tuple = reader.next(); + if (tuple != null) { + fileReadCompletely = false; + ++tupleCounter; + MessageId msgId = new MessageId(tupleCounter, reader.getFilePath(), reader.getFileOffset()); + emitData(tuple, msgId); + + if (!ackEnabled) { + ++acksSinceLastCommit; // assume message is immediately ACKed in non-ack mode + commitProgress(reader.getFileOffset()); + } else { + commitProgress(tracker.getCommitPosition()); + } + return; + } else { + fileReadCompletely = true; + // if newReader is true and tuple is null then it is an empty reader + if (!ackEnabled || newReader) { + markFileAsDone(reader.getFilePath()); + } + } + } catch (IOException e) { + LOG.error("I/O Error processing at file location " + getFileProgress(reader), e); + // don't emit anything .. allow configured spout wait strategy to kick in + return; + } catch (ParseException e) { + LOG.error("Parsing error when processing at file location " + getFileProgress(reader) + + ". Skipping remainder of file.", e); + markFileAsBad(reader.getFilePath()); + // Note: We don't return from this method on ParseException to avoid triggering the + // spout wait strategy (due to no emits). Instead we go back into the loop and + // generate a tuple from next file + } + } // while + } + + // will commit progress into lock file if commit threshold is reached + private void commitProgress(FileOffset position) { + if (position == null) { + return; + } + if (lock != null && canCommitNow()) { + try { + String pos = position.toString(); + lock.heartbeat(pos); + LOG.debug("{} Committed progress. {}", spoutId, pos); + acksSinceLastCommit = 0; + commitTimeElapsed.set(false); + setupCommitElapseTimer(); + } catch (IOException e) { + LOG.error("Unable to commit progress Will retry later. Spout ID = " + spoutId, e); + } + } + } + + private void setupCommitElapseTimer() { + if (commitFrequencySec <= 0) { + return; + } + TimerTask timerTask = new TimerTask() { + @Override + public void run() { + commitTimeElapsed.set(true); + } + }; + commitTimer.schedule(timerTask, commitFrequencySec * 1000); + } + + private void markFileAsDone(Path filePath) { + try { + Path newFile = renameCompletedFile(reader.getFilePath()); + LOG.info("Completed processing {}. Spout Id = {}", newFile, spoutId); + } catch (IOException e) { + LOG.error("Unable to archive completed file" + filePath + " Spout ID " + spoutId, e); + } + closeReaderAndResetTrackers(); + } + + private void markFileAsBad(Path file) { + String fileName = file.toString(); + String fileNameMinusSuffix = fileName.substring(0, fileName.indexOf(inprogressSuffix)); + String originalName = new Path(fileNameMinusSuffix).getName(); + Path newFile = new Path(badFilesDirPath + Path.SEPARATOR + originalName); + + LOG.info("Moving bad file {} to {}. Processed it till offset {}. SpoutID= {}", originalName, newFile, tracker.getCommitPosition(), + spoutId); + try { + if (!hdfs.rename(file, newFile)) { // seems this can fail by returning false or throwing exception + throw new IOException("Move failed for bad file: " + file); // convert false ret value to exception + } + } catch (IOException e) { + LOG.warn("Error moving bad file: " + file + " to destination " + newFile + " SpoutId =" + spoutId, e); + } + closeReaderAndResetTrackers(); + } + + private void closeReaderAndResetTrackers() { + inflight.clear(); + tracker.offsets.clear(); + retryList.clear(); + + reader.close(); + reader = null; + releaseLockAndLog(lock, spoutId); + lock = null; + } + + protected void emitData(List tuple, MessageId id) { + LOG.trace("Emitting - {}", id); + + if (outputStreamName == null) { + collector.emit(tuple, id); + } else { + collector.emit(outputStreamName, tuple, id); + } + + inflight.put(id, tuple); + } + + @SuppressWarnings("deprecation") + @Override + public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { + LOG.info("Opening HDFS Spout"); + this.conf = conf; + this.commitTimer = new Timer(context.getThisTaskId() + "-commit-timer", true); + this.tracker = new ProgressTracker(); + this.hdfsConfig = new Configuration(); + this.collector = collector; + + // Hdfs related settings + if (this.hdfsUri == null && conf.containsKey(Configs.HDFS_URI)) { + this.hdfsUri = conf.get(Configs.HDFS_URI).toString(); + } + if (this.hdfsUri == null) { + throw new RuntimeException("HDFS Uri not set on spout"); + } + + try { + this.hdfs = FileSystem.get(URI.create(hdfsUri), hdfsConfig); + } catch (IOException e) { + LOG.error("Unable to instantiate file system", e); + throw new RuntimeException("Unable to instantiate file system", e); + } + + if (conf.containsKey(configKey)) { + Map map = (Map) conf.get(configKey); + if (map != null) { + for (String keyName : map.keySet()) { + LOG.info("HDFS Config override : {} = {} ", keyName, String.valueOf(map.get(keyName))); + this.hdfsConfig.set(keyName, String.valueOf(map.get(keyName))); + } + try { + HdfsSecurityUtil.login(conf, hdfsConfig); + } catch (IOException e) { + LOG.error("HDFS Login failed ", e); + throw new RuntimeException(e); + } + } // if (map != null) + } + + // Reader type config + if (readerType == null && conf.containsKey(Configs.READER_TYPE)) { + readerType = conf.get(Configs.READER_TYPE).toString(); + } + checkValidReader(readerType); + + // -- source dir config + if (sourceDir == null && conf.containsKey(Configs.SOURCE_DIR)) { + sourceDir = conf.get(Configs.SOURCE_DIR).toString(); + } + if (sourceDir == null) { + LOG.error(Configs.SOURCE_DIR + " setting is required"); + throw new RuntimeException(Configs.SOURCE_DIR + " setting is required"); + } + this.sourceDirPath = new Path(sourceDir); + + // -- archive dir config + if (archiveDir == null && conf.containsKey(Configs.ARCHIVE_DIR)) { + archiveDir = conf.get(Configs.ARCHIVE_DIR).toString(); + } + if (archiveDir == null) { + LOG.error(Configs.ARCHIVE_DIR + " setting is required"); + throw new RuntimeException(Configs.ARCHIVE_DIR + " setting is required"); + } + this.archiveDirPath = new Path(archiveDir); + validateOrMakeDir(hdfs, archiveDirPath, "Archive"); + + // -- bad files dir config + if (badFilesDir == null && conf.containsKey(Configs.BAD_DIR)) { + badFilesDir = conf.get(Configs.BAD_DIR).toString(); + } + if (badFilesDir == null) { + LOG.error(Configs.BAD_DIR + " setting is required"); + throw new RuntimeException(Configs.BAD_DIR + " setting is required"); + } + this.badFilesDirPath = new Path(badFilesDir); + validateOrMakeDir(hdfs, badFilesDirPath, "bad files"); + + // -- ignore file names config + if (conf.containsKey(Configs.IGNORE_SUFFIX)) { + this.ignoreSuffix = conf.get(Configs.IGNORE_SUFFIX).toString(); + } + + // -- lock dir config + if (lockDir == null && conf.containsKey(Configs.LOCK_DIR)) { + lockDir = conf.get(Configs.LOCK_DIR).toString(); + } + if (lockDir == null) { + lockDir = getDefaultLockDir(sourceDirPath); + } + this.lockDirPath = new Path(lockDir); + validateOrMakeDir(hdfs, lockDirPath, "locks"); + + // -- lock timeout + if (conf.get(Configs.LOCK_TIMEOUT) != null) { + this.lockTimeoutSec = Integer.parseInt(conf.get(Configs.LOCK_TIMEOUT).toString()); + } + + // -- enable/disable ACKing + Object ackers = conf.get(Config.TOPOLOGY_ACKER_EXECUTORS); + if (ackers != null) { + int ackerCount = Integer.parseInt(ackers.toString()); + this.ackEnabled = (ackerCount > 0); + LOG.debug("ACKer count = {}", ackerCount); + } else { // ackers==null when ackerCount not explicitly set on the topology + this.ackEnabled = true; + LOG.debug("ACK count not explicitly set on topology."); + } + + LOG.info("ACK mode is {}", ackEnabled ? "enabled" : "disabled"); + + // -- commit frequency - count + if (conf.get(Configs.COMMIT_FREQ_COUNT) != null) { + commitFrequencyCount = Integer.parseInt(conf.get(Configs.COMMIT_FREQ_COUNT).toString()); + } + + // -- commit frequency - seconds + if (conf.get(Configs.COMMIT_FREQ_SEC) != null) { + commitFrequencySec = Integer.parseInt(conf.get(Configs.COMMIT_FREQ_SEC).toString()); + if (commitFrequencySec <= 0) { + throw new RuntimeException(Configs.COMMIT_FREQ_SEC + " setting must be greater than 0"); + } + } + + // -- max outstanding tuples + if (conf.get(Configs.MAX_OUTSTANDING) != null) { + maxOutstanding = Integer.parseInt(conf.get(Configs.MAX_OUTSTANDING).toString()); + } + + // -- clocks in sync + if (conf.get(Configs.CLOCKS_INSYNC) != null) { + clocksInSync = Boolean.parseBoolean(conf.get(Configs.CLOCKS_INSYNC).toString()); + } + + // -- spout id + spoutId = context.getThisComponentId(); + + // setup timer for commit elapse time tracking + setupCommitElapseTimer(); + } + + @Override + public void close() { + this.commitTimer.cancel(); + } + + private String getDefaultLockDir(Path sourceDirPath) { + return sourceDirPath.toString() + Path.SEPARATOR + Configs.DEFAULT_LOCK_DIR; + } + + @Override + public void ack(Object msgId) { + LOG.trace("Ack received for msg {} on spout {}", msgId, spoutId); + if (!ackEnabled) { + return; + } + MessageId id = (MessageId) msgId; + inflight.remove(id); + ++acksSinceLastCommit; + tracker.recordAckedOffset(id.offset); + commitProgress(tracker.getCommitPosition()); + if (fileReadCompletely && inflight.isEmpty()) { + markFileAsDone(reader.getFilePath()); + reader = null; + } + super.ack(msgId); + } + + private boolean canCommitNow() { + + if (commitFrequencyCount > 0 && acksSinceLastCommit >= commitFrequencyCount) { + return true; + } + return commitTimeElapsed.get(); + } + + @Override + public void fail(Object msgId) { + LOG.trace("Fail received for msg id {} on spout {}", msgId, spoutId); + super.fail(msgId); + if (ackEnabled) { + HdfsUtils.Pair> item = HdfsUtils.Pair.of(msgId, inflight.remove(msgId)); + retryList.add(item); + } + } + + private FileReader pickNextFile() { + try { + // 1) If there are any abandoned files, pick oldest one + lock = getOldestExpiredLock(); + if (lock != null) { + LOG.debug("Spout {} now took over ownership of abandoned FileLock {}", spoutId, lock.getLockFile()); + Path file = getFileForLockFile(lock.getLockFile(), sourceDirPath); + String resumeFromOffset = lock.getLastLogEntry().fileOffset; + LOG.info("Resuming processing of abandoned file : {}", file); + return createFileReader(file, resumeFromOffset); + } + + // 2) If no abandoned files, then pick oldest file in sourceDirPath, lock it and rename it + Collection listing = HdfsUtils.listFilesByModificationTime(hdfs, sourceDirPath, 0); + + for (Path file : listing) { + if (file.getName().endsWith(inprogressSuffix)) { + continue; + } + if (file.getName().endsWith(ignoreSuffix)) { + continue; + } + lock = FileLock.tryLock(hdfs, file, lockDirPath, spoutId); + if (lock == null) { + LOG.debug("Unable to get FileLock for {}, so skipping it.", file); + continue; // could not lock, so try another file. + } + try { + Path newFile = renameToInProgressFile(file); + FileReader result = createFileReader(newFile); + LOG.info("Processing : {} ", file); + return result; + } catch (Exception e) { + LOG.error("Skipping file " + file, e); + releaseLockAndLog(lock, spoutId); + continue; + } + } + + return null; + } catch (IOException e) { + LOG.error("Unable to select next file for consumption " + sourceDirPath, e); + return null; + } + } + + /** + * If clocks in sync, then acquires the oldest expired lock Else, on first call, just remembers the oldest expired lock, on next call + * check if the lock is updated. if not updated then acquires the lock + * + * @return a lock object + */ + private FileLock getOldestExpiredLock() throws IOException { + // 1 - acquire lock on dir + DirLock dirlock = DirLock.tryLock(hdfs, lockDirPath); + if (dirlock == null) { + dirlock = DirLock.takeOwnershipIfStale(hdfs, lockDirPath, lockTimeoutSec); + if (dirlock == null) { + LOG.debug("Spout {} could not take over ownership of DirLock for {}", spoutId, lockDirPath); + return null; + } + LOG.debug("Spout {} now took over ownership of abandoned DirLock for {}", spoutId, lockDirPath); + } else { + LOG.debug("Spout {} now owns DirLock for {}", spoutId, lockDirPath); + } + + try { + // 2 - if clocks are in sync then simply take ownership of the oldest expired lock + if (clocksInSync) { + return FileLock.acquireOldestExpiredLock(hdfs, lockDirPath, lockTimeoutSec, spoutId); + } + + // 3 - if clocks are not in sync .. + if (lastExpiredLock == null) { + // just make a note of the oldest expired lock now and check if its still unmodified after lockTimeoutSec + lastExpiredLock = FileLock.locateOldestExpiredLock(hdfs, lockDirPath, lockTimeoutSec); + lastExpiredLockTime = System.currentTimeMillis(); + return null; + } + // see if lockTimeoutSec time has elapsed since we last selected the lock file + if (hasExpired(lastExpiredLockTime)) { + return null; + } + + // If lock file has expired, then own it + FileLock.LogEntry lastEntry = FileLock.getLastEntry(hdfs, lastExpiredLock.getKey()); + if (lastEntry.equals(lastExpiredLock.getValue())) { + FileLock result = FileLock.takeOwnership(hdfs, lastExpiredLock.getKey(), lastEntry, spoutId); + lastExpiredLock = null; + return result; + } else { + // if lock file has been updated since last time, then leave this lock file alone + lastExpiredLock = null; + return null; + } + } finally { + dirlock.release(); + LOG.debug("Released DirLock {}, SpoutID {} ", dirlock.getLockFile(), spoutId); + } + } + + private boolean hasExpired(long lastModifyTime) { + return (System.currentTimeMillis() - lastModifyTime) < lockTimeoutSec * 1000; + } + + /** + * Creates a reader that reads from beginning of file. + * + * @param file file to read + */ + private FileReader createFileReader(Path file) + throws IOException { + if (readerType.equalsIgnoreCase(Configs.SEQ)) { + return new SequenceFileReader(this.hdfs, file, conf); + } + if (readerType.equalsIgnoreCase(Configs.TEXT)) { + return new TextFileReader(this.hdfs, file, conf); + } + try { + Class clsType = Class.forName(readerType); + Constructor constructor = clsType.getConstructor(FileSystem.class, Path.class, Map.class); + return (FileReader) constructor.newInstance(this.hdfs, file, conf); + } catch (Exception e) { + LOG.error(e.getMessage(), e); + throw new RuntimeException("Unable to instantiate " + readerType + " reader", e); + } + } + + /** + * Creates a reader that starts reading from 'offset'. + * + * @param file the file to read + * @param offset the offset string should be understandable by the reader type being used + */ + private FileReader createFileReader(Path file, String offset) + throws IOException { + if (readerType.equalsIgnoreCase(Configs.SEQ)) { + return new SequenceFileReader(this.hdfs, file, conf, offset); + } + if (readerType.equalsIgnoreCase(Configs.TEXT)) { + return new TextFileReader(this.hdfs, file, conf, offset); + } + + try { + Class clsType = Class.forName(readerType); + Constructor constructor = clsType.getConstructor(FileSystem.class, Path.class, Map.class, String.class); + return (FileReader) constructor.newInstance(this.hdfs, file, conf, offset); + } catch (Exception e) { + LOG.error(e.getMessage(), e); + throw new RuntimeException("Unable to instantiate " + readerType, e); + } + } + + /** + * Renames files with .inprogress suffix. + * + * @return path of renamed file + * @throws if operation fails + */ + private Path renameToInProgressFile(Path file) + throws IOException { + Path newFile = new Path(file.toString() + inprogressSuffix); + try { + if (hdfs.rename(file, newFile)) { + return newFile; + } + throw new RenameException(file, newFile); + } catch (IOException e) { + throw new RenameException(file, newFile, e); + } + } + + /** + * Returns the corresponding input file in the 'sourceDirPath' for the specified lock file. If no such file is found then returns null + */ + private Path getFileForLockFile(Path lockFile, Path sourceDirPath) + throws IOException { + String lockFileName = lockFile.getName(); + Path dataFile = new Path(sourceDirPath + Path.SEPARATOR + lockFileName + inprogressSuffix); + if (hdfs.exists(dataFile)) { + return dataFile; + } + dataFile = new Path(sourceDirPath + Path.SEPARATOR + lockFileName); + if (hdfs.exists(dataFile)) { + return dataFile; + } + return null; + } + + // renames files and returns the new file path + private Path renameCompletedFile(Path file) throws IOException { + String fileName = file.toString(); + String fileNameMinusSuffix = fileName.substring(0, fileName.indexOf(inprogressSuffix)); + String newName = new Path(fileNameMinusSuffix).getName(); + + Path newFile = new Path(archiveDirPath + Path.SEPARATOR + newName); + LOG.info("Completed consuming file {}", fileNameMinusSuffix); + if (!hdfs.rename(file, newFile)) { + throw new IOException("Rename failed for file: " + file); + } + LOG.debug("Renamed file {} to {} ", file, newFile); + return newFile; + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer declarer) { + if (outputStreamName != null) { + declarer.declareStream(outputStreamName, outputFields); + } else { + declarer.declare(outputFields); + } + } + + static class MessageId implements Comparable { + + public long msgNumber; // tracks order in which msg came in + public String fullPath; + public FileOffset offset; + + MessageId(long msgNumber, Path fullPath, FileOffset offset) { + this.msgNumber = msgNumber; + this.fullPath = fullPath.toString(); + this.offset = offset; + } + + @Override + public String toString() { + return "{'" + fullPath + "':" + offset + "}"; + } + + @Override + public int compareTo(MessageId rhs) { + if (msgNumber < rhs.msgNumber) { + return -1; + } + if (msgNumber > rhs.msgNumber) { + return 1; + } + return 0; + } + } + + private static class RenameException extends IOException { + + public final Path oldFile; + public final Path newFile; + + RenameException(Path oldFile, Path newFile) { + super("Rename of " + oldFile + " to " + newFile + " failed"); + this.oldFile = oldFile; + this.newFile = newFile; + } + + RenameException(Path oldFile, Path newFile, IOException cause) { + super("Rename of " + oldFile + " to " + newFile + " failed", cause); + this.oldFile = oldFile; + this.newFile = newFile; + } + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/ParseException.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/ParseException.java new file mode 100644 index 00000000000..a7845ea24c2 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/ParseException.java @@ -0,0 +1,20 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.spout; + +public class ParseException extends Exception { + public ParseException(String message, Throwable cause) { + super(message, cause); + } + +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/ProgressTracker.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/ProgressTracker.java new file mode 100644 index 00000000000..93a9b093009 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/ProgressTracker.java @@ -0,0 +1,64 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.spout; + +import java.io.PrintStream; +import java.util.TreeSet; + +public class ProgressTracker { + + TreeSet offsets = new TreeSet<>(); + + public synchronized void recordAckedOffset(FileOffset newOffset) { + if (newOffset == null) { + return; + } + offsets.add(newOffset); + + FileOffset currHead = offsets.first(); + + if (currHead.isNextOffset(newOffset)) { // check is a minor optimization + trimHead(); + } + } + + // remove contiguous elements from the head of the heap + // e.g.: 1,2,3,4,10,11,12,15 => 4,10,11,12,15 + private synchronized void trimHead() { + if (offsets.size() <= 1) { + return; + } + FileOffset head = offsets.first(); + FileOffset head2 = offsets.higher(head); + if (head.isNextOffset(head2)) { + offsets.pollFirst(); + trimHead(); + } + return; + } + + public synchronized FileOffset getCommitPosition() { + if (!offsets.isEmpty()) { + return offsets.first().clone(); + } + return null; + } + + public synchronized void dumpState(PrintStream stream) { + stream.println(offsets); + } + + public synchronized int size() { + return offsets.size(); + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/SequenceFileReader.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/SequenceFileReader.java new file mode 100644 index 00000000000..d245df7eaa8 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/SequenceFileReader.java @@ -0,0 +1,213 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.spout; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.SequenceFile; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.util.ReflectionUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class SequenceFileReader extends AbstractFileReader { + public static final String[] defaultFields = { "key", "value" }; + public static final String BUFFER_SIZE = "hdfsspout.reader.buffer.bytes"; + private static final Logger LOG = LoggerFactory + .getLogger(SequenceFileReader.class); + private static final int DEFAULT_BUFF_SIZE = 4096; + private final SequenceFile.Reader reader; + + private final SequenceFileReader.Offset offset; + + + private final KeyT key; + private final ValueT value; + + + public SequenceFileReader(FileSystem fs, Path file, Map conf) + throws IOException { + super(fs, file); + int bufferSize = !conf.containsKey(BUFFER_SIZE) ? DEFAULT_BUFF_SIZE : Integer.parseInt(conf.get(BUFFER_SIZE).toString()); + this.reader = new SequenceFile.Reader(fs.getConf(), SequenceFile.Reader.file(file), SequenceFile.Reader.bufferSize(bufferSize)); + this.key = (KeyT) ReflectionUtils.newInstance(reader.getKeyClass(), fs.getConf()); + this.value = (ValueT) ReflectionUtils.newInstance(reader.getValueClass(), fs.getConf()); + this.offset = new SequenceFileReader.Offset(0, 0, 0); + } + + public SequenceFileReader(FileSystem fs, Path file, Map conf, String offset) + throws IOException { + super(fs, file); + int bufferSize = !conf.containsKey(BUFFER_SIZE) ? DEFAULT_BUFF_SIZE : Integer.parseInt(conf.get(BUFFER_SIZE).toString()); + this.offset = new SequenceFileReader.Offset(offset); + this.reader = new SequenceFile.Reader(fs.getConf(), SequenceFile.Reader.file(file), SequenceFile.Reader.bufferSize(bufferSize)); + this.key = (KeyT) ReflectionUtils.newInstance(reader.getKeyClass(), fs.getConf()); + this.value = (ValueT) ReflectionUtils.newInstance(reader.getValueClass(), fs.getConf()); + skipToOffset(this.reader, this.offset, this.key); + } + + private static void skipToOffset(SequenceFile.Reader reader, Offset offset, K key) throws IOException { + reader.sync(offset.lastSyncPoint); + for (int i = 0; i < offset.recordsSinceLastSync; ++i) { + reader.next(key); + } + } + + @Override + public List next() throws IOException, ParseException { + if (reader.next(key, value)) { + ArrayList result = new ArrayList(2); + Collections.addAll(result, key, value); + offset.increment(reader.syncSeen(), reader.getPosition()); + return result; + } + return null; + } + + @Override + public void close() { + try { + reader.close(); + } catch (IOException e) { + LOG.warn("Ignoring error when closing file " + getFilePath(), e); + } + } + + @Override + public Offset getFileOffset() { + return offset; + } + + + public static class Offset implements FileOffset { + public long lastSyncPoint; + public long recordsSinceLastSync; + public long currentRecord; + private long currRecordEndOffset; + private long prevRecordEndOffset; + + public Offset(long lastSyncPoint, long recordsSinceLastSync, long currentRecord) { + this(lastSyncPoint, recordsSinceLastSync, currentRecord, 0, 0); + } + + public Offset(long lastSyncPoint, + long recordsSinceLastSync, + long currentRecord, + long currRecordEndOffset, + long prevRecordEndOffset) { + this.lastSyncPoint = lastSyncPoint; + this.recordsSinceLastSync = recordsSinceLastSync; + this.currentRecord = currentRecord; + this.prevRecordEndOffset = prevRecordEndOffset; + this.currRecordEndOffset = currRecordEndOffset; + } + + public Offset(String offset) { + try { + if (offset == null) { + throw new IllegalArgumentException("offset cannot be null"); + } + if (offset.equalsIgnoreCase("0")) { + this.lastSyncPoint = 0; + this.recordsSinceLastSync = 0; + this.currentRecord = 0; + this.prevRecordEndOffset = 0; + this.currRecordEndOffset = 0; + } else { + String[] parts = offset.split(":"); + this.lastSyncPoint = Long.parseLong(parts[0].split("=")[1]); + this.recordsSinceLastSync = Long.parseLong(parts[1].split("=")[1]); + this.currentRecord = Long.parseLong(parts[2].split("=")[1]); + this.prevRecordEndOffset = 0; + this.currRecordEndOffset = 0; + } + } catch (Exception e) { + throw new IllegalArgumentException("'" + offset + + "' cannot be interpreted. It is not in expected format for SequenceFileReader." + + " Format e.g. {sync=123:afterSync=345:record=67}"); + } + } + + @Override + public String toString() { + return '{' + + "sync=" + lastSyncPoint + + ":afterSync=" + recordsSinceLastSync + + ":record=" + currentRecord + + ":}"; + } + + @Override + public boolean isNextOffset(FileOffset rhs) { + if (rhs instanceof Offset) { + Offset other = ((Offset) rhs); + return other.currentRecord > currentRecord + 1; + } + return false; + } + + @Override + public int compareTo(FileOffset o) { + Offset rhs = ((Offset) o); + if (currentRecord < rhs.currentRecord) { + return -1; + } + if (currentRecord == rhs.currentRecord) { + return 0; + } + return 1; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof Offset)) { + return false; + } + + Offset offset = (Offset) o; + + return currentRecord == offset.currentRecord; + } + + @Override + public int hashCode() { + return (int) (currentRecord ^ (currentRecord >>> 32)); + } + + void increment(boolean syncSeen, long newBytePosition) { + if (!syncSeen) { + ++recordsSinceLastSync; + } else { + recordsSinceLastSync = 1; + lastSyncPoint = prevRecordEndOffset; + } + ++currentRecord; + prevRecordEndOffset = currRecordEndOffset; + currentRecord = newBytePosition; + } + + @Override + public Offset clone() { + return new Offset(lastSyncPoint, recordsSinceLastSync, currentRecord, currRecordEndOffset, prevRecordEndOffset); + } + + } //class Offset +} //class diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/TextFileReader.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/TextFileReader.java new file mode 100644 index 00000000000..0b3da9c2625 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/TextFileReader.java @@ -0,0 +1,192 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.spout; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +// Todo: Track file offsets instead of line number +public class TextFileReader extends AbstractFileReader { + public static final String[] defaultFields = { "line" }; + public static final String CHARSET = "hdfsspout.reader.charset"; + public static final String BUFFER_SIZE = "hdfsspout.reader.buffer.bytes"; + + private static final int DEFAULT_BUFF_SIZE = 4096; + private static final Logger LOG = LoggerFactory.getLogger(TextFileReader.class); + private BufferedReader reader; + private TextFileReader.Offset offset; + + public TextFileReader(FileSystem fs, Path file, Map conf) throws IOException { + this(fs, file, conf, new TextFileReader.Offset(0, 0)); + } + + public TextFileReader(FileSystem fs, Path file, Map conf, String startOffset) throws IOException { + this(fs, file, conf, new TextFileReader.Offset(startOffset)); + } + + private TextFileReader(FileSystem fs, Path file, Map conf, TextFileReader.Offset startOffset) + throws IOException { + super(fs, file); + offset = startOffset; + FSDataInputStream in = fs.open(file); + + String charSet = (conf == null || !conf.containsKey(CHARSET)) ? "UTF-8" : conf.get(CHARSET).toString(); + int buffSz = + (conf == null || !conf.containsKey(BUFFER_SIZE)) ? DEFAULT_BUFF_SIZE : Integer.parseInt(conf.get(BUFFER_SIZE).toString()); + reader = new BufferedReader(new InputStreamReader(in, charSet), buffSz); + if (offset.charOffset > 0) { + reader.skip(offset.charOffset); + } + + } + + @Override + public Offset getFileOffset() { + return offset.clone(); + } + + @Override + public List next() throws IOException, ParseException { + String line = readLineAndTrackOffset(reader); + if (line != null) { + return Collections.singletonList((Object) line); + } + return null; + } + + private String readLineAndTrackOffset(BufferedReader reader) throws IOException { + StringBuffer sb = new StringBuffer(1000); + long before = offset.charOffset; + int ch; + while ((ch = reader.read()) != -1) { + ++offset.charOffset; + if (ch == '\n') { + ++offset.lineNumber; + return sb.toString(); + } else if (ch != '\r') { + sb.append((char) ch); + } + } + if (before == offset.charOffset) { // reached EOF, didnt read anything + return null; + } + return sb.toString(); + } + + @Override + public void close() { + try { + reader.close(); + } catch (IOException e) { + LOG.warn("Ignoring error when closing file " + getFilePath(), e); + } + } + + public static class Offset implements FileOffset { + long charOffset; + long lineNumber; + + public Offset(long byteOffset, long lineNumber) { + this.charOffset = byteOffset; + this.lineNumber = lineNumber; + } + + public Offset(String offset) { + if (offset == null) { + throw new IllegalArgumentException("offset cannot be null"); + } + try { + if (offset.equalsIgnoreCase("0")) { + this.charOffset = 0; + this.lineNumber = 0; + } else { + String[] parts = offset.split(":"); + this.charOffset = Long.parseLong(parts[0].split("=")[1]); + this.lineNumber = Long.parseLong(parts[1].split("=")[1]); + } + } catch (Exception e) { + throw new IllegalArgumentException("'" + offset + + "' cannot be interpreted. It is not in expected format for TextFileReader." + + " Format e.g. {char=123:line=5}"); + } + } + + @Override + public String toString() { + return '{' + + "char=" + charOffset + + ":line=" + lineNumber + + ":}"; + } + + @Override + public boolean isNextOffset(FileOffset rhs) { + if (rhs instanceof Offset) { + Offset other = ((Offset) rhs); + return other.charOffset > charOffset + && other.lineNumber == lineNumber + 1; + } + return false; + } + + @Override + public int compareTo(FileOffset o) { + Offset rhs = ((Offset) o); + if (lineNumber < rhs.lineNumber) { + return -1; + } + if (lineNumber == rhs.lineNumber) { + return 0; + } + return 1; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof Offset)) { + return false; + } + + Offset that = (Offset) o; + + if (charOffset != that.charOffset) { + return false; + } + return lineNumber == that.lineNumber; + } + + @Override + public int hashCode() { + int result = (int) (charOffset ^ (charOffset >>> 32)); + result = 31 * result + (int) (lineNumber ^ (lineNumber >>> 32)); + return result; + } + + @Override + public Offset clone() { + return new Offset(charOffset, lineNumber); + } + } //class Offset +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/HdfsState.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/HdfsState.java new file mode 100644 index 00000000000..118a113a204 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/HdfsState.java @@ -0,0 +1,556 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.hdfs.trident; + +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.OutputStreamWriter; +import java.io.Serializable; +import java.net.URI; +import java.util.ArrayList; +import java.util.EnumSet; +import java.util.List; +import java.util.Map; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; +import org.apache.hadoop.io.SequenceFile; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.compress.CompressionCodecFactory; +import org.apache.storm.Config; +import org.apache.storm.hdfs.common.rotation.RotationAction; +import org.apache.storm.hdfs.security.HdfsSecurityUtil; +import org.apache.storm.hdfs.trident.format.FileNameFormat; +import org.apache.storm.hdfs.trident.format.RecordFormat; +import org.apache.storm.hdfs.trident.format.SequenceFormat; +import org.apache.storm.hdfs.trident.rotation.FileRotationPolicy; +import org.apache.storm.hdfs.trident.rotation.FileSizeRotationPolicy; +import org.apache.storm.hdfs.trident.rotation.TimedRotationPolicy; +import org.apache.storm.task.IMetricsContext; +import org.apache.storm.topology.FailedException; +import org.apache.storm.trident.operation.TridentCollector; +import org.apache.storm.trident.state.State; +import org.apache.storm.trident.tuple.TridentTuple; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class HdfsState implements State { + + public static final Logger LOG = LoggerFactory.getLogger(HdfsState.class); + private Options options; + private volatile TxnRecord lastSeenTxn; + private Path indexFilePath; + + + HdfsState(Options options) { + this.options = options; + } + + void prepare(Map conf, IMetricsContext metrics, int partitionIndex, int numPartitions) { + this.options.prepare(conf, partitionIndex, numPartitions); + initLastTxn(conf, partitionIndex); + } + + private TxnRecord readTxnRecord(Path path) throws IOException { + FSDataInputStream inputStream = null; + try { + inputStream = this.options.fs.open(path); + BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream)); + String line; + if ((line = reader.readLine()) != null) { + String[] fields = line.split(","); + return new TxnRecord(Long.valueOf(fields[0]), fields[1], Long.valueOf(fields[2])); + } + } finally { + if (inputStream != null) { + inputStream.close(); + } + } + return new TxnRecord(0, options.currentFile.toString(), 0); + } + + /** + * Returns temp file path corresponding to a file name. + */ + private Path tmpFilePath(String filename) { + return new Path(filename + ".tmp"); + } + + /** + * Reads the last txn record from index file if it exists, if not from .tmp file if exists. + * + * @param indexFilePath the index file path + * @return the txn record from the index file or a default initial record + */ + private TxnRecord getTxnRecord(Path indexFilePath) throws IOException { + Path tmpPath = tmpFilePath(indexFilePath.toString()); + if (this.options.fs.exists(indexFilePath)) { + return readTxnRecord(indexFilePath); + } else if (this.options.fs.exists(tmpPath)) { + return readTxnRecord(tmpPath); + } + return new TxnRecord(0, options.currentFile.toString(), 0); + } + + private void initLastTxn(Map conf, int partition) { + // include partition id in the file name so that index for different partitions are independent. + String indexFileName = String.format(".index.%s.%d", conf.get(Config.TOPOLOGY_NAME), partition); + this.indexFilePath = new Path(options.fileNameFormat.getPath(), indexFileName); + try { + this.lastSeenTxn = getTxnRecord(indexFilePath); + LOG.debug("initLastTxn updated lastSeenTxn to [{}]", this.lastSeenTxn); + } catch (IOException e) { + LOG.warn("initLastTxn failed due to IOException.", e); + throw new RuntimeException(e); + } + } + + private void updateIndex(long txId) { + LOG.debug("Starting index update."); + final Path tmpPath = tmpFilePath(indexFilePath.toString()); + + try (FSDataOutputStream out = this.options.fs.create(tmpPath, true); + BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(out))) { + TxnRecord txnRecord = new TxnRecord(txId, options.currentFile.toString(), this.options.getCurrentOffset()); + bw.write(txnRecord.toString()); + bw.newLine(); + bw.flush(); + out.close(); /* In non error scenarios, for the Azure Data Lake Store File System (adl://), + the output stream must be closed before the file associated with it is deleted. + For ADLFS deleting the file also removes any handles to the file, hence out.close() will fail. */ + /* + * Delete the current index file and rename the tmp file to atomically + * replace the index file. Orphan .tmp files are handled in getTxnRecord. + */ + options.fs.delete(this.indexFilePath, false); + options.fs.rename(tmpPath, this.indexFilePath); + lastSeenTxn = txnRecord; + LOG.debug("updateIndex updated lastSeenTxn to [{}]", this.lastSeenTxn); + } catch (IOException e) { + LOG.warn("Begin commit failed due to IOException. Failing batch", e); + throw new FailedException(e); + } + } + + @Override + public void beginCommit(Long txId) { + if (txId <= lastSeenTxn.txnid) { + LOG.info("txID {} is already processed, lastSeenTxn {}. Triggering recovery.", txId, lastSeenTxn); + long start = System.currentTimeMillis(); + options.recover(lastSeenTxn.dataFilePath, lastSeenTxn.offset); + LOG.info("Recovery took {} ms.", System.currentTimeMillis() - start); + } + updateIndex(txId); + } + + @Override + public void commit(Long txId) { + try { + options.doCommit(txId); + } catch (IOException e) { + LOG.warn("Commit failed due to IOException. Failing the batch.", e); + throw new FailedException(e); + } + } + + public void updateState(List tuples, TridentCollector tridentCollector) { + try { + this.options.execute(tuples); + } catch (IOException e) { + LOG.warn("Failing batch due to IOException.", e); + throw new FailedException(e); + } + } + + /** + * for unit tests. + */ + void close() throws IOException { + this.options.closeOutputFile(); + } + + public abstract static class Options implements Serializable { + + protected String fsUrl; + protected String configKey; + protected transient FileSystem fs; + protected FileRotationPolicy rotationPolicy; + protected FileNameFormat fileNameFormat; + protected int rotation = 0; + protected transient Configuration hdfsConfig; + protected ArrayList rotationActions = new ArrayList(); + private Path currentFile; + + abstract void closeOutputFile() throws IOException; + + abstract Path createOutputFile() throws IOException; + + abstract void execute(List tuples) throws IOException; + + abstract void doPrepare(Map conf, int partitionIndex, int numPartitions) throws IOException; + + abstract long getCurrentOffset() throws IOException; + + abstract void doCommit(Long txId) throws IOException; + + abstract void doRecover(Path srcPath, long numberOfBytes) throws Exception; + + protected void rotateOutputFile(boolean doRotateAction) throws IOException { + LOG.info("Rotating output file..."); + @SuppressWarnings("checkstyle:VariableDeclarationUsageDistance") + long start = System.currentTimeMillis(); + closeOutputFile(); + this.rotation++; + Path newFile = createOutputFile(); + if (doRotateAction) { + LOG.info("Performing {} file rotation actions.", this.rotationActions.size()); + for (RotationAction action : this.rotationActions) { + action.execute(this.fs, this.currentFile); + } + } + this.currentFile = newFile; + long time = System.currentTimeMillis() - start; + LOG.info("File rotation took {} ms.", time); + } + + protected void rotateOutputFile() throws IOException { + rotateOutputFile(true); + } + + + void prepare(Map conf, int partitionIndex, int numPartitions) { + if (this.rotationPolicy == null) { + throw new IllegalStateException("RotationPolicy must be specified."); + } else if (this.rotationPolicy instanceof FileSizeRotationPolicy) { + long rotationBytes = ((FileSizeRotationPolicy) rotationPolicy).getMaxBytes(); + LOG.warn("FileSizeRotationPolicy specified with {} bytes.", rotationBytes); + LOG.warn("Recovery will fail if data files cannot be copied within topology.message.timeout.secs."); + LOG.warn("Ensure that the data files does not grow too big with the FileSizeRotationPolicy."); + } else if (this.rotationPolicy instanceof TimedRotationPolicy) { + LOG.warn("TimedRotationPolicy specified with interval {} ms.", ((TimedRotationPolicy) rotationPolicy).getInterval()); + LOG.warn("Recovery will fail if data files cannot be copied within topology.message.timeout.secs."); + LOG.warn("Ensure that the data files does not grow too big with the TimedRotationPolicy."); + } + if (this.fsUrl == null) { + throw new IllegalStateException("File system URL must be specified."); + } + this.fileNameFormat.prepare(conf, partitionIndex, numPartitions); + this.hdfsConfig = new Configuration(); + Map map = (Map) conf.get(this.configKey); + if (map != null) { + for (String key : map.keySet()) { + this.hdfsConfig.set(key, String.valueOf(map.get(key))); + } + } + try { + HdfsSecurityUtil.login(conf, hdfsConfig); + doPrepare(conf, partitionIndex, numPartitions); + this.currentFile = createOutputFile(); + + } catch (Exception e) { + throw new RuntimeException("Error preparing HdfsState: " + e.getMessage(), e); + } + + rotationPolicy.start(); + } + + /** + * Recovers nBytes from srcFile to the new file created by calling rotateOutputFile and then deletes the srcFile. + */ + private void recover(String srcFile, long numberOfBytes) { + try { + Path srcPath = new Path(srcFile); + rotateOutputFile(false); + this.rotationPolicy.reset(); + if (numberOfBytes > 0) { + doRecover(srcPath, numberOfBytes); + LOG.info("Recovered {} bytes from {} to {}", numberOfBytes, srcFile, currentFile); + } else { + LOG.info("Nothing to recover from {}", srcFile); + } + fs.delete(srcPath, false); + LOG.info("Deleted file {} that had partial commits.", srcFile); + } catch (Exception e) { + LOG.warn("Recovery failed.", e); + throw new RuntimeException(e); + } + } + + } + + public static class HdfsFileOptions extends Options { + + protected RecordFormat format; + private transient FSDataOutputStream out; + private long offset = 0; + private int bufferSize = 131072; // default 128 K + + public HdfsFileOptions withFsUrl(String fsUrl) { + this.fsUrl = fsUrl; + return this; + } + + public HdfsFileOptions withConfigKey(String configKey) { + this.configKey = configKey; + return this; + } + + public HdfsFileOptions withFileNameFormat(FileNameFormat fileNameFormat) { + this.fileNameFormat = fileNameFormat; + return this; + } + + public HdfsFileOptions withRecordFormat(RecordFormat format) { + this.format = format; + return this; + } + + public HdfsFileOptions withRotationPolicy(FileRotationPolicy rotationPolicy) { + this.rotationPolicy = rotationPolicy; + return this; + } + + /** + *

Set the size of the buffer used for hdfs file copy in case of recovery. The default + * value is 131072.

+ * + *

Note: The lower limit for the parameter is 4096, below which the + * option is ignored.

+ * + * @param sizeInBytes the buffer size in bytes + * @return {@link HdfsFileOptions} + */ + public HdfsFileOptions withBufferSize(int sizeInBytes) { + this.bufferSize = Math.max(4096, sizeInBytes); // at least 4K + return this; + } + + @Deprecated + public HdfsFileOptions addRotationAction(RotationAction action) { + this.rotationActions.add(action); + return this; + } + + @Override + void doPrepare(Map conf, int partitionIndex, int numPartitions) throws IOException { + LOG.info("Preparing HDFS File state..."); + this.fs = FileSystem.get(URI.create(this.fsUrl), hdfsConfig); + } + + @Override + public long getCurrentOffset() { + return offset; + } + + @Override + public void doCommit(Long txId) throws IOException { + if (this.rotationPolicy.mark(this.offset)) { + rotateOutputFile(); + this.offset = 0; + this.rotationPolicy.reset(); + } else { + if (this.out instanceof HdfsDataOutputStream) { + ((HdfsDataOutputStream) this.out).hsync(EnumSet.of(HdfsDataOutputStream.SyncFlag.UPDATE_LENGTH)); + } else { + this.out.hsync(); + } + } + } + + @Override + void doRecover(Path srcPath, long numberOfBytes) throws IOException { + this.offset = 0; + FSDataInputStream is = this.fs.open(srcPath); + copyBytes(is, out, numberOfBytes); + this.offset = numberOfBytes; + } + + private void copyBytes(FSDataInputStream is, FSDataOutputStream out, long bytesToCopy) throws IOException { + byte[] buf = new byte[bufferSize]; + int n; + while ((n = is.read(buf)) != -1 && bytesToCopy > 0) { + out.write(buf, 0, (int) Math.min(n, bytesToCopy)); + bytesToCopy -= n; + } + } + + @Override + void closeOutputFile() throws IOException { + this.out.close(); + } + + @Override + Path createOutputFile() throws IOException { + Path path = new Path(this.fileNameFormat.getPath(), this.fileNameFormat.getName(this.rotation, System.currentTimeMillis())); + this.out = this.fs.create(path); + return path; + } + + @Override + public void execute(List tuples) throws IOException { + for (TridentTuple tuple : tuples) { + byte[] bytes = this.format.format(tuple); + out.write(bytes); + this.offset += bytes.length; + } + } + } + + public static class SequenceFileOptions extends Options { + private SequenceFormat format; + private SequenceFile.CompressionType compressionType = SequenceFile.CompressionType.RECORD; + private transient SequenceFile.Writer writer; + private String compressionCodec = "default"; + private transient CompressionCodecFactory codecFactory; + + public SequenceFileOptions withCompressionCodec(String codec) { + this.compressionCodec = codec; + return this; + } + + public SequenceFileOptions withFsUrl(String fsUrl) { + this.fsUrl = fsUrl; + return this; + } + + public SequenceFileOptions withConfigKey(String configKey) { + this.configKey = configKey; + return this; + } + + public SequenceFileOptions withFileNameFormat(FileNameFormat fileNameFormat) { + this.fileNameFormat = fileNameFormat; + return this; + } + + public SequenceFileOptions withSequenceFormat(SequenceFormat format) { + this.format = format; + return this; + } + + public SequenceFileOptions withRotationPolicy(FileRotationPolicy rotationPolicy) { + this.rotationPolicy = rotationPolicy; + return this; + } + + public SequenceFileOptions withCompressionType(SequenceFile.CompressionType compressionType) { + this.compressionType = compressionType; + return this; + } + + public SequenceFileOptions addRotationAction(RotationAction action) { + this.rotationActions.add(action); + return this; + } + + @Override + void doPrepare(Map conf, int partitionIndex, int numPartitions) throws IOException { + LOG.info("Preparing Sequence File State..."); + if (this.format == null) { + throw new IllegalStateException("SequenceFormat must be specified."); + } + + this.fs = FileSystem.get(URI.create(this.fsUrl), hdfsConfig); + this.codecFactory = new CompressionCodecFactory(hdfsConfig); + } + + @Override + public long getCurrentOffset() throws IOException { + return this.writer.getLength(); + } + + @Override + public void doCommit(Long txId) throws IOException { + if (this.rotationPolicy.mark(this.writer.getLength())) { + rotateOutputFile(); + this.rotationPolicy.reset(); + } else { + this.writer.hsync(); + } + } + + + @Override + void doRecover(Path srcPath, long numberOfBytes) throws Exception { + SequenceFile.Reader reader = new SequenceFile.Reader(this.hdfsConfig, + SequenceFile.Reader.file(srcPath), + SequenceFile.Reader.length(numberOfBytes)); + + Writable key = (Writable) this.format.keyClass().newInstance(); + Writable value = (Writable) this.format.valueClass().newInstance(); + while (reader.next(key, value)) { + this.writer.append(key, value); + } + } + + @Override + Path createOutputFile() throws IOException { + Path p = new Path(this.fsUrl + this.fileNameFormat.getPath(), + this.fileNameFormat.getName(this.rotation, System.currentTimeMillis())); + this.writer = SequenceFile.createWriter( + this.hdfsConfig, + SequenceFile.Writer.file(p), + SequenceFile.Writer.keyClass(this.format.keyClass()), + SequenceFile.Writer.valueClass(this.format.valueClass()), + SequenceFile.Writer.compression(this.compressionType, this.codecFactory.getCodecByName(this.compressionCodec)) + ); + return p; + } + + @Override + void closeOutputFile() throws IOException { + this.writer.close(); + } + + @Override + public void execute(List tuples) throws IOException { + for (TridentTuple tuple : tuples) { + this.writer.append(this.format.key(tuple), this.format.value(tuple)); + } + } + + } + + /** + * TxnRecord [txnid, data_file_path, data_file_offset]. + * + *

This is written to the index file during beginCommit() and used for recovery. + */ + private static class TxnRecord { + private long txnid; + private String dataFilePath; + private long offset; + + private TxnRecord(long txnId, String dataFilePath, long offset) { + this.txnid = txnId; + this.dataFilePath = dataFilePath; + this.offset = offset; + } + + @Override + public String toString() { + return Long.toString(txnid) + "," + dataFilePath + "," + Long.toString(offset); + } + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/HdfsStateFactory.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/HdfsStateFactory.java new file mode 100644 index 00000000000..568f8bc9310 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/HdfsStateFactory.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.hdfs.trident; + +import java.util.Map; +import org.apache.storm.task.IMetricsContext; +import org.apache.storm.trident.state.State; +import org.apache.storm.trident.state.StateFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class HdfsStateFactory implements StateFactory { + private static final Logger LOG = LoggerFactory.getLogger(HdfsStateFactory.class); + private HdfsState.Options options; + + public HdfsStateFactory() {} + + public HdfsStateFactory withOptions(HdfsState.Options options) { + this.options = options; + return this; + } + + @Override + public State makeState(Map conf, IMetricsContext metrics, int partitionIndex, int numPartitions) { + LOG.info("makeState(partitonIndex={}, numpartitions={}", partitionIndex, numPartitions); + HdfsState state = new HdfsState(this.options); + state.prepare(conf, metrics, partitionIndex, numPartitions); + return state; + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/HdfsUpdater.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/HdfsUpdater.java new file mode 100644 index 00000000000..a63bb40c0e8 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/HdfsUpdater.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.hdfs.trident; + +import java.util.List; +import org.apache.storm.trident.operation.TridentCollector; +import org.apache.storm.trident.state.BaseStateUpdater; +import org.apache.storm.trident.tuple.TridentTuple; + +public class HdfsUpdater extends BaseStateUpdater { + @Override + public void updateState(HdfsState state, List tuples, TridentCollector collector) { + state.updateState(tuples, collector); + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/DefaultFileNameFormat.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/DefaultFileNameFormat.java new file mode 100644 index 00000000000..e48e1986700 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/DefaultFileNameFormat.java @@ -0,0 +1,73 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.trident.format; + +import java.util.Map; + + +/** + * Creates file names with the following format: + *

+ *     {prefix}-{partitionId}-{rotationNum}-{timestamp}{extension}
+ * 
+ * For example: + *
+ *     MyBolt-5-7-1390579837830.txt
+ * 
+ * + *

By default, prefix is empty and extenstion is ".txt". + * + */ +public class DefaultFileNameFormat implements FileNameFormat { + private int partitionIndex; + private String path = "/storm"; + private String prefix = ""; + private String extension = ".txt"; + + /** + * Overrides the default prefix. + */ + public DefaultFileNameFormat withPrefix(String prefix) { + this.prefix = prefix; + return this; + } + + /** + * Overrides the default file extension. + */ + public DefaultFileNameFormat withExtension(String extension) { + this.extension = extension; + return this; + } + + public DefaultFileNameFormat withPath(String path) { + this.path = path; + return this; + } + + @Override + public void prepare(Map conf, int partitionIndex, int numPartitions) { + this.partitionIndex = partitionIndex; + + } + + @Override + public String getName(long rotation, long timeStamp) { + return this.prefix + "-" + this.partitionIndex + "-" + rotation + "-" + timeStamp + this.extension; + } + + @Override + public String getPath() { + return this.path; + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/DefaultSequenceFormat.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/DefaultSequenceFormat.java new file mode 100644 index 00000000000..f33c03083cb --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/DefaultSequenceFormat.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.hdfs.trident.format; + +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.storm.trident.tuple.TridentTuple; + +/** + * Basic SequenceFormat implementation that uses + * LongWritable for keys and Text for values. + */ +public class DefaultSequenceFormat implements SequenceFormat { + private transient LongWritable key; + private transient Text value; + + private String keyField; + private String valueField; + + public DefaultSequenceFormat(String keyField, String valueField) { + this.keyField = keyField; + this.valueField = valueField; + } + + + @Override + public Class keyClass() { + return LongWritable.class; + } + + @Override + public Class valueClass() { + return Text.class; + } + + @Override + public Writable key(TridentTuple tuple) { + if (this.key == null) { + this.key = new LongWritable(); + } + this.key.set(tuple.getLongByField(this.keyField)); + return this.key; + } + + @Override + public Writable value(TridentTuple tuple) { + if (this.value == null) { + this.value = new Text(); + } + this.value.set(tuple.getStringByField(this.valueField)); + return this.value; + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/DelimitedRecordFormat.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/DelimitedRecordFormat.java new file mode 100644 index 00000000000..c12b478f964 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/DelimitedRecordFormat.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.trident.format; + +import org.apache.storm.trident.tuple.TridentTuple; +import org.apache.storm.tuple.Fields; + +/** + * RecordFormat implementation that uses field and record delimiters. + * By default uses a comma (",") as the field delimiter and a + * newline ("\n") as the record delimiter. + */ +public class DelimitedRecordFormat implements RecordFormat { + public static final String DEFAULT_FIELD_DELIMITER = ","; + public static final String DEFAULT_RECORD_DELIMITER = "\n"; + private String fieldDelimiter = DEFAULT_FIELD_DELIMITER; + private String recordDelimiter = DEFAULT_RECORD_DELIMITER; + private Fields fields = null; + + /** + * Only output the specified fields. + */ + public DelimitedRecordFormat withFields(Fields fields) { + this.fields = fields; + return this; + } + + /** + * Overrides the default field delimiter. + */ + public DelimitedRecordFormat withFieldDelimiter(String delimiter) { + this.fieldDelimiter = delimiter; + return this; + } + + /** + * Overrides the default record delimiter. + */ + public DelimitedRecordFormat withRecordDelimiter(String delimiter) { + this.recordDelimiter = delimiter; + return this; + } + + @Override + public byte[] format(TridentTuple tuple) { + StringBuilder sb = new StringBuilder(); + int size = this.fields.size(); + for (int i = 0; i < size; i++) { + sb.append(tuple.getValueByField(fields.get(i))); + if (i != size - 1) { + sb.append(this.fieldDelimiter); + } + } + sb.append(this.recordDelimiter); + return sb.toString().getBytes(); + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/FileNameFormat.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/FileNameFormat.java new file mode 100644 index 00000000000..0b7ac464fb3 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/FileNameFormat.java @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.trident.format; + +import java.io.Serializable; +import java.util.Map; + +/** + * Formatter interface for determining HDFS file names. + */ +public interface FileNameFormat extends Serializable { + + void prepare(Map conf, int partitionIndex, int numPartitions); + + /** + * Returns the filename the HdfsBolt will create. + * @param rotation the current file rotation number (incremented on every rotation) + * @param timeStamp current time in milliseconds when the rotation occurs + */ + String getName(long rotation, long timeStamp); + + String getPath(); +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/RecordFormat.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/RecordFormat.java new file mode 100644 index 00000000000..b2f2cc3dab3 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/RecordFormat.java @@ -0,0 +1,23 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.trident.format; + +import java.io.Serializable; +import org.apache.storm.trident.tuple.TridentTuple; + +/** + * Formats a Tuple object into a byte array that will be written to HDFS. + */ +public interface RecordFormat extends Serializable { + byte[] format(TridentTuple tuple); +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/SequenceFormat.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/SequenceFormat.java new file mode 100644 index 00000000000..815bf2f1f1b --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/SequenceFormat.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.hdfs.trident.format; + +import java.io.Serializable; +import org.apache.hadoop.io.Writable; +import org.apache.storm.trident.tuple.TridentTuple; + +/** + * Interface for converting TridentTuple objects to HDFS sequence file key-value pairs. + */ +public interface SequenceFormat extends Serializable { + /** + * Key class used by implementation (e.g. IntWritable.class, etc.). + */ + Class keyClass(); + + /** + * Value class used by implementation (e.g. Text.class, etc.). + */ + Class valueClass(); + + /** + * Given a tuple, return the key that should be written to the sequence file. + */ + Writable key(TridentTuple tuple); + + /** + * Given a tuple, return the value that should be written to the sequence file. + */ + Writable value(TridentTuple tuple); +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/SimpleFileNameFormat.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/SimpleFileNameFormat.java new file mode 100644 index 00000000000..889c60b661e --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/SimpleFileNameFormat.java @@ -0,0 +1,89 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.trident.format; + +import java.net.UnknownHostException; +import java.text.SimpleDateFormat; +import java.util.Date; +import java.util.Map; +import org.apache.storm.utils.Utils; + +public class SimpleFileNameFormat implements FileNameFormat { + + private static final long serialVersionUID = 1L; + + private int partitionIndex; + private String host; + private String path = "/storm"; + private String name = "$TIME.$NUM.txt"; + private String timeFormat = "yyyyMMddHHmmss"; + + @Override + public String getName(long rotation, long timeStamp) { + // compile parameters + SimpleDateFormat dateFormat = new SimpleDateFormat(timeFormat); + String ret = name + .replace("$TIME", dateFormat.format(new Date(timeStamp))) + .replace("$NUM", String.valueOf(rotation)) + .replace("$HOST", host) + .replace("$PARTITION", String.valueOf(partitionIndex)); + return ret; + } + + @Override + public String getPath() { + return path; + } + + @SuppressWarnings("unchecked") + @Override + public void prepare(Map conf, int partitionIndex, int numPartitions) { + this.partitionIndex = partitionIndex; + try { + this.host = Utils.localHostname(); + } catch (UnknownHostException e) { + throw new RuntimeException(e); + } + } + + public SimpleFileNameFormat withPath(String path) { + this.path = path; + return this; + } + + /** + * support parameters:
+ * $TIME - current time. use withTimeFormat to format.
+ * $NUM - rotation number
+ * $HOST - local host name
+ * $PARTITION - partition index
+ * + * @param name file name + */ + public SimpleFileNameFormat withName(String name) { + this.name = name; + return this; + } + + public SimpleFileNameFormat withTimeFormat(String timeFormat) { + //check format + try { + new SimpleDateFormat(timeFormat); + } catch (Exception e) { + throw new IllegalArgumentException("invalid timeFormat: " + e.getMessage()); + } + this.timeFormat = timeFormat; + return this; + } + +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/FileRotationPolicy.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/FileRotationPolicy.java new file mode 100644 index 00000000000..a2a593246f8 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/FileRotationPolicy.java @@ -0,0 +1,58 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.trident.rotation; + +import java.io.Serializable; +import org.apache.storm.trident.tuple.TridentTuple; + +/** + * Used by the HdfsBolt to decide when to rotate files. + * + *

The HdfsBolt will call the mark() method for every + * tuple received. If the mark() method returns + * true the HdfsBolt will perform a file rotation. + * + *

After file rotation, the HdfsBolt will call the reset() + * method. + */ +public interface FileRotationPolicy extends Serializable { + /** + * Called for every tuple the HdfsBolt executes. + * + * @param tuple The tuple executed. + * @param offset current offset of file being written + * @return true if a file rotation should be performed + */ + boolean mark(TridentTuple tuple, long offset); + + /** + * Check if a file rotation should be performed based on + * the offset at which file is being written. + * + * @param offset the current offset of file being written + * @return true if a file rotation should be performed. + */ + boolean mark(long offset); + + /** + * Called after the HdfsBolt rotates a file. + * + */ + void reset(); + + /** + * Start the policy. Useful in case of policies like timed rotation + * where the timer can be started. + */ + void start(); +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/FileSizeRotationPolicy.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/FileSizeRotationPolicy.java new file mode 100644 index 00000000000..f9631b706e2 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/FileSizeRotationPolicy.java @@ -0,0 +1,85 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.trident.rotation; + +import org.apache.storm.trident.tuple.TridentTuple; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * File rotation policy that will rotate files when a certain + * file size is reached. + * + *

For example: + *

+ *     // rotate when files reach 5MB
+ *     FileSizeRotationPolicy policy =
+ *          new FileSizeRotationPolicy(5.0, Units.MB);
+ * 
+ */ +public class FileSizeRotationPolicy implements FileRotationPolicy { + private static final Logger LOG = LoggerFactory.getLogger(FileSizeRotationPolicy.class); + private long maxBytes; + private long lastOffset = 0; + private long currentBytesWritten = 0; + + public FileSizeRotationPolicy(float count, Units units) { + this.maxBytes = (long) (count * units.getByteCount()); + } + + @Override + public boolean mark(TridentTuple tuple, long offset) { + return mark(offset); + } + + @Override + public boolean mark(long offset) { + long diff = offset - this.lastOffset; + this.currentBytesWritten += diff; + this.lastOffset = offset; + return this.currentBytesWritten >= this.maxBytes; + } + + @Override + public void reset() { + this.currentBytesWritten = 0; + this.lastOffset = 0; + } + + @Override + public void start() { + + } + + public long getMaxBytes() { + return maxBytes; + } + + public enum Units { + + KB((long) Math.pow(2, 10)), + MB((long) Math.pow(2, 20)), + GB((long) Math.pow(2, 30)), + TB((long) Math.pow(2, 40)); + + private long byteCount; + + Units(long byteCount) { + this.byteCount = byteCount; + } + + public long getByteCount() { + return byteCount; + } + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/NoRotationPolicy.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/NoRotationPolicy.java new file mode 100644 index 00000000000..f6fedb943a3 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/NoRotationPolicy.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.trident.rotation; + +import org.apache.storm.trident.tuple.TridentTuple; + +/** + * File rotation policy that will never rotate... + * Just one big file. Intended for testing purposes. + */ +public class NoRotationPolicy implements FileRotationPolicy { + @Override + public boolean mark(TridentTuple tuple, long offset) { + return false; + } + + @Override + public boolean mark(long offset) { + return false; + } + + @Override + public void reset() { + } + + @Override + public void start() { + + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/TimedRotationPolicy.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/TimedRotationPolicy.java new file mode 100644 index 00000000000..5f4475cbe0b --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/TimedRotationPolicy.java @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.storm.hdfs.trident.rotation; + +import java.util.Timer; +import java.util.TimerTask; +import java.util.concurrent.atomic.AtomicBoolean; +import org.apache.storm.trident.tuple.TridentTuple; + +public class TimedRotationPolicy implements FileRotationPolicy { + + private long interval; + private Timer rotationTimer; + private AtomicBoolean rotationTimerTriggered = new AtomicBoolean(); + + public TimedRotationPolicy(float count, TimeUnit units) { + this.interval = (long) (count * units.getMilliSeconds()); + } + + /** + * Called for every tuple the HdfsBolt executes. + * + * @param tuple The tuple executed. + * @param offset current offset of file being written + * @return true if a file rotation should be performed + */ + @Override + public boolean mark(TridentTuple tuple, long offset) { + return rotationTimerTriggered.get(); + } + + @Override + public boolean mark(long offset) { + return rotationTimerTriggered.get(); + } + + /** + * Called after the HdfsBolt rotates a file. + */ + @Override + public void reset() { + rotationTimerTriggered.set(false); + } + + public long getInterval() { + return this.interval; + } + + /** + * Start the timer to run at fixed intervals. + */ + @Override + public void start() { + rotationTimer = new Timer(true); + TimerTask task = new TimerTask() { + @Override + public void run() { + rotationTimerTriggered.set(true); + } + }; + rotationTimer.scheduleAtFixedRate(task, interval, interval); + } + + public enum TimeUnit { + + SECONDS((long) 1000), + MINUTES((long) 1000 * 60), + HOURS((long) 1000 * 60 * 60), + DAYS((long) 1000 * 60 * 60 * 24); + + private long milliSeconds; + + TimeUnit(long milliSeconds) { + this.milliSeconds = milliSeconds; + } + + public long getMilliSeconds() { + return milliSeconds; + } + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/sync/CountSyncPolicy.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/sync/CountSyncPolicy.java new file mode 100644 index 00000000000..f98dbdf8a40 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/sync/CountSyncPolicy.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.trident.sync; + +import org.apache.storm.trident.tuple.TridentTuple; + +/** + * SyncPolicy implementation that will trigger a + * file system sync after a certain number of tuples + * have been processed. + */ +public class CountSyncPolicy implements SyncPolicy { + private int count; + private int executeCount = 0; + + public CountSyncPolicy(int count) { + this.count = count; + } + + @Override + public boolean mark(TridentTuple tuple, long offset) { + this.executeCount++; + return this.executeCount >= this.count; + } + + @Override + public void reset() { + this.executeCount = 0; + } +} diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/sync/SyncPolicy.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/sync/SyncPolicy.java new file mode 100644 index 00000000000..ba397248907 --- /dev/null +++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/sync/SyncPolicy.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.trident.sync; + +import java.io.Serializable; +import org.apache.storm.trident.tuple.TridentTuple; + +/** + * Interface for controlling when the HdfsBolt + * syncs and flushes the filesystem. + * + */ +public interface SyncPolicy extends Serializable { + /** + * Called for every tuple the HdfsBolt executes. + * + * @param tuple The tuple executed. + * @param offset current offset for the file being written + * @return true if a sync should be performed + */ + boolean mark(TridentTuple tuple, long offset); + + + /** + * Called after the HdfsBolt performs a sync. + * + */ + void reset(); + +} diff --git a/external/storm-hdfs/src/main/resources/META-INF/services/org.apache.storm.validation.Validated b/external/storm-hdfs/src/main/resources/META-INF/services/org.apache.storm.validation.Validated new file mode 100644 index 00000000000..18c242e3e7a --- /dev/null +++ b/external/storm-hdfs/src/main/resources/META-INF/services/org.apache.storm.validation.Validated @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +org.apache.storm.hdfs.spout.Configs diff --git a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/avro/TestFixedAvroSerializer.java b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/avro/TestFixedAvroSerializer.java new file mode 100644 index 00000000000..0fd27fdab76 --- /dev/null +++ b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/avro/TestFixedAvroSerializer.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.avro; + +import org.apache.avro.Schema; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; + +public class TestFixedAvroSerializer { + //These should match FixedAvroSerializer.config in the test resources + private static final String schemaString1 = "{\"type\":\"record\"," + + "\"name\":\"stormtest1\"," + + "\"fields\":[{\"name\":\"foo1\",\"type\":\"string\"}," + + "{ \"name\":\"int1\", \"type\":\"int\" }]}"; + private static final String schemaString2 = "{\"type\":\"record\"," + + "\"name\":\"stormtest2\"," + + "\"fields\":[{\"name\":\"foobar1\",\"type\":\"string\"}," + + "{ \"name\":\"intint1\", \"type\":\"int\" }]}"; + private static Schema schema1; + private static Schema schema2; + + final AvroSchemaRegistry reg; + + public TestFixedAvroSerializer() throws Exception { + reg = new FixedAvroSerializer(); + } + + @BeforeAll + public static void setupClass() { + + Schema.Parser parser = new Schema.Parser(); + schema1 = parser.parse(schemaString1); + + parser = new Schema.Parser(); + schema2 = parser.parse(schemaString2); + } + + @Test + public void testSchemas() { + testTheSchema(schema1); + testTheSchema(schema2); + } + + @Test + public void testDifferentFPs() { + String fp1 = reg.getFingerprint(schema1); + String fp2 = reg.getFingerprint(schema2); + + assertNotEquals(fp1, fp2); + } + + private void testTheSchema(Schema schema) { + String fp1 = reg.getFingerprint(schema); + Schema found = reg.getSchema(fp1); + String fp2 = reg.getFingerprint(found); + + assertEquals(found, schema); + assertEquals(fp1, fp2); + } +} diff --git a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/avro/TestGenericAvroSerializer.java b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/avro/TestGenericAvroSerializer.java new file mode 100644 index 00000000000..20435bc5126 --- /dev/null +++ b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/avro/TestGenericAvroSerializer.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.avro; + +import org.apache.avro.Schema; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; + +public class TestGenericAvroSerializer { + private static final String schemaString1 = "{\"type\":\"record\"," + + "\"name\":\"stormtest1\"," + + "\"fields\":[{\"name\":\"foo1\",\"type\":\"string\"}," + + "{ \"name\":\"int1\", \"type\":\"int\" }]}"; + private static final String schemaString2 = "{\"type\":\"record\"," + + "\"name\":\"stormtest2\"," + + "\"fields\":[{\"name\":\"foobar1\",\"type\":\"string\"}," + + "{ \"name\":\"intint1\", \"type\":\"int\" }]}"; + private static Schema schema1; + private static Schema schema2; + + AvroSchemaRegistry reg = new GenericAvroSerializer(); + + @BeforeAll + public static void setupClass() { + + Schema.Parser parser = new Schema.Parser(); + schema1 = parser.parse(schemaString1); + + parser = new Schema.Parser(); + schema2 = parser.parse(schemaString2); + } + + @Test + public void testSchemas() { + testTheSchema(schema1); + testTheSchema(schema2); + } + + @Test + public void testDifferentFPs() { + String fp1 = reg.getFingerprint(schema1); + String fp2 = reg.getFingerprint(schema2); + + assertNotEquals(fp1, fp2); + } + + private void testTheSchema(Schema schema) { + String fp1 = reg.getFingerprint(schema); + Schema found = reg.getSchema(fp1); + String fp2 = reg.getFingerprint(found); + + assertEquals(found, schema); + assertEquals(fp1, fp2); + } +} diff --git a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/AvroGenericRecordBoltTest.java b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/AvroGenericRecordBoltTest.java new file mode 100644 index 00000000000..bcd31e47f7e --- /dev/null +++ b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/AvroGenericRecordBoltTest.java @@ -0,0 +1,271 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.bolt; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.util.HashMap; +import org.apache.avro.Schema; +import org.apache.avro.file.DataFileReader; +import org.apache.avro.generic.GenericDatumReader; +import org.apache.avro.generic.GenericRecord; +import org.apache.avro.generic.GenericRecordBuilder; +import org.apache.avro.io.DatumReader; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.storm.Config; +import org.apache.storm.hdfs.bolt.format.DefaultFileNameFormat; +import org.apache.storm.hdfs.bolt.format.FileNameFormat; +import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy; +import org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy; +import org.apache.storm.hdfs.bolt.sync.CountSyncPolicy; +import org.apache.storm.hdfs.bolt.sync.SyncPolicy; +import org.apache.storm.hdfs.testing.MiniDFSClusterExtension; +import org.apache.storm.task.GeneralTopologyContext; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.TupleImpl; +import org.apache.storm.tuple.Values; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.api.extension.RegisterExtension; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +@ExtendWith(MockitoExtension.class) +public class AvroGenericRecordBoltTest { + + private static final String testRoot = "/unittest"; + private static final String schemaV1 = "{\"type\":\"record\"," + + "\"name\":\"myrecord\"," + + "\"fields\":[{\"name\":\"foo1\",\"type\":\"string\"}," + + "{ \"name\":\"int1\", \"type\":\"int\" }]}"; + private static final String schemaV2 = "{\"type\":\"record\"," + + "\"name\":\"myrecord\"," + + "\"fields\":[{\"name\":\"foo1\",\"type\":\"string\"}," + + "{ \"name\":\"bar\", \"type\":\"string\", \"default\":\"baz\" }," + + "{ \"name\":\"int1\", \"type\":\"int\" }]}"; + private static Schema schema1; + private static Schema schema2; + private static Tuple tuple1; + private static Tuple tuple2; + + @RegisterExtension + public static final MiniDFSClusterExtension DFS_CLUSTER_EXTENSION = new MiniDFSClusterExtension(() -> { + Configuration conf = new Configuration(); + conf.set("fs.trash.interval", "10"); + conf.setBoolean("dfs.permissions", true); + File baseDir = new File("./target/hdfs/").getAbsoluteFile(); + FileUtil.fullyDelete(baseDir); + conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath()); + return conf; + }); + @Mock + private OutputCollector collector; + @Mock + private TopologyContext topologyContext; + private DistributedFileSystem fs; + private String hdfsURI; + + @BeforeAll + public static void setupClass() { + Schema.Parser parser = new Schema.Parser(); + schema1 = parser.parse(schemaV1); + + parser = new Schema.Parser(); + schema2 = parser.parse(schemaV2); + + GenericRecordBuilder builder1 = new GenericRecordBuilder(schema1); + builder1.set("foo1", "bar1"); + builder1.set("int1", 1); + tuple1 = generateTestTuple(builder1.build()); + + GenericRecordBuilder builder2 = new GenericRecordBuilder(schema2); + builder2.set("foo1", "bar2"); + builder2.set("int1", 2); + tuple2 = generateTestTuple(builder2.build()); + } + + private static Tuple generateTestTuple(GenericRecord record) { + TopologyBuilder builder = new TopologyBuilder(); + GeneralTopologyContext topologyContext = + new GeneralTopologyContext(builder.createTopology(), new Config(), new HashMap(), new HashMap<>(), + new HashMap<>(), "") { + @Override + public Fields getComponentOutputFields(String componentId, String streamId) { + return new Fields("record"); + } + }; + return new TupleImpl(topologyContext, new Values(record), topologyContext.getComponentId(1), 1, ""); + } + + @BeforeEach + public void setup() throws Exception { + fs = DFS_CLUSTER_EXTENSION.getDfscluster().getFileSystem(); + hdfsURI = fs.getUri() + "/"; + } + + @AfterEach + public void shutDown() throws IOException { + fs.close(); + } + + @Test + public void multipleTuplesOneFile() throws IOException { + AvroGenericRecordBolt bolt = makeAvroBolt(hdfsURI, 1, 1f, schemaV1); + + bolt.prepare(new Config(), topologyContext, collector); + bolt.execute(tuple1); + bolt.execute(tuple1); + bolt.execute(tuple1); + bolt.execute(tuple1); + + assertEquals(1, countNonZeroLengthFiles(testRoot)); + verifyAllAvroFiles(testRoot); + } + + @Test + public void multipleTuplesMultiplesFiles() throws IOException { + AvroGenericRecordBolt bolt = makeAvroBolt(hdfsURI, 1, .0001f, schemaV1); + + bolt.prepare(new Config(), topologyContext, collector); + bolt.execute(tuple1); + bolt.execute(tuple1); + bolt.execute(tuple1); + bolt.execute(tuple1); + + assertEquals(4, countNonZeroLengthFiles(testRoot)); + verifyAllAvroFiles(testRoot); + } + + @Test + public void forwardSchemaChangeWorks() throws IOException { + AvroGenericRecordBolt bolt = makeAvroBolt(hdfsURI, 1, 1000f, schemaV1); + + bolt.prepare(new Config(), topologyContext, collector); + bolt.execute(tuple1); + bolt.execute(tuple2); + + //Schema change should have forced a rotation + assertEquals(2, countNonZeroLengthFiles(testRoot)); + + verifyAllAvroFiles(testRoot); + } + + @Test + public void backwardSchemaChangeWorks() throws IOException { + AvroGenericRecordBolt bolt = makeAvroBolt(hdfsURI, 1, 1000f, schemaV2); + + bolt.prepare(new Config(), topologyContext, collector); + bolt.execute(tuple1); + bolt.execute(tuple2); + + //Schema changes should have forced file rotations + assertEquals(2, countNonZeroLengthFiles(testRoot)); + verifyAllAvroFiles(testRoot); + } + + @Test + public void schemaThrashing() throws IOException { + AvroGenericRecordBolt bolt = makeAvroBolt(hdfsURI, 1, 1000f, schemaV2); + + bolt.prepare(new Config(), topologyContext, collector); + bolt.execute(tuple1); + bolt.execute(tuple2); + bolt.execute(tuple1); + bolt.execute(tuple2); + bolt.execute(tuple1); + bolt.execute(tuple2); + bolt.execute(tuple1); + bolt.execute(tuple2); + + //Two distinct schema should result in only two files + assertEquals(2, countNonZeroLengthFiles(testRoot)); + verifyAllAvroFiles(testRoot); + } + + private AvroGenericRecordBolt makeAvroBolt(String nameNodeAddr, int countSync, float rotationSizeMB, String schemaAsString) { + + SyncPolicy fieldsSyncPolicy = new CountSyncPolicy(countSync); + + FileNameFormat fieldsFileNameFormat = new DefaultFileNameFormat().withPath(testRoot); + + FileRotationPolicy rotationPolicy = + new FileSizeRotationPolicy(rotationSizeMB, FileSizeRotationPolicy.Units.MB); + + return new AvroGenericRecordBolt() + .withFsUrl(nameNodeAddr) + .withFileNameFormat(fieldsFileNameFormat) + .withRotationPolicy(rotationPolicy) + .withSyncPolicy(fieldsSyncPolicy); + } + + private void verifyAllAvroFiles(String path) throws IOException { + Path p = new Path(path); + + for (FileStatus file : fs.listStatus(p)) { + if (file.getLen() > 0) { + fileIsGoodAvro(file.getPath()); + } + } + } + + private int countNonZeroLengthFiles(String path) throws IOException { + Path p = new Path(path); + int nonZero = 0; + + for (FileStatus file : fs.listStatus(p)) { + if (file.getLen() > 0) { + nonZero++; + } + } + + return nonZero; + } + + private void fileIsGoodAvro(Path path) throws IOException { + DatumReader datumReader = new GenericDatumReader<>(); + try (FSDataInputStream in = fs.open(path, 0); FileOutputStream out = new FileOutputStream("target/FOO.avro")) { + byte[] buffer = new byte[100]; + int bytesRead; + while ((bytesRead = in.read(buffer)) > 0) { + out.write(buffer, 0, bytesRead); + } + } + + java.io.File file = new File("target/FOO.avro"); + + try (DataFileReader dataFileReader = new DataFileReader<>(file, datumReader)) { + GenericRecord user = null; + while (dataFileReader.hasNext()) { + user = dataFileReader.next(user); + } + } + + file.delete(); + } +} diff --git a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/TestHdfsBolt.java b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/TestHdfsBolt.java new file mode 100644 index 00000000000..2a6faa0f4be --- /dev/null +++ b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/TestHdfsBolt.java @@ -0,0 +1,280 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.bolt; + +import java.io.File; +import java.io.IOException; +import java.util.HashMap; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.storm.Config; +import org.apache.storm.hdfs.bolt.format.DefaultFileNameFormat; +import org.apache.storm.hdfs.bolt.format.DelimitedRecordFormat; +import org.apache.storm.hdfs.bolt.format.FileNameFormat; +import org.apache.storm.hdfs.bolt.format.RecordFormat; +import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy; +import org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy; +import org.apache.storm.hdfs.bolt.sync.CountSyncPolicy; +import org.apache.storm.hdfs.bolt.sync.SyncPolicy; +import org.apache.storm.hdfs.common.Partitioner; +import org.apache.storm.hdfs.testing.MiniDFSClusterExtension; +import org.apache.storm.task.GeneralTopologyContext; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.TupleImpl; +import org.apache.storm.tuple.Values; +import org.apache.storm.utils.MockTupleHelpers; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.api.extension.RegisterExtension; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; + + +@ExtendWith(MockitoExtension.class) +public class TestHdfsBolt { + @RegisterExtension + public static final MiniDFSClusterExtension DFS_CLUSTER_EXTENSION = new MiniDFSClusterExtension(() -> { + Configuration conf = new Configuration(); + conf.set("fs.trash.interval", "10"); + conf.setBoolean("dfs.permissions", true); + File baseDir = new File("./target/hdfs/").getAbsoluteFile(); + FileUtil.fullyDelete(baseDir); + conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath()); + return conf; + }); + private static final String testRoot = "/unittest"; + Tuple tuple1 = generateTestTuple(1, "First Tuple", "SFO", "CA"); + Tuple tuple2 = generateTestTuple(1, "Second Tuple", "SJO", "CA"); + private String hdfsURI; + private DistributedFileSystem fs; + @Mock + private OutputCollector collector; + @Mock + private TopologyContext topologyContext; + + @BeforeEach + public void setup() throws Exception { + fs = DFS_CLUSTER_EXTENSION.getDfscluster().getFileSystem(); + hdfsURI = "hdfs://localhost:" + DFS_CLUSTER_EXTENSION.getDfscluster().getNameNodePort() + "/"; + } + + @AfterEach + public void shutDown() throws IOException { + fs.close(); + } + + @Test + public void testTwoTuplesTwoFiles() throws IOException { + HdfsBolt bolt = makeHdfsBolt(hdfsURI, 1, .00001f); + + bolt.prepare(new Config(), topologyContext, collector); + bolt.execute(tuple1); + bolt.execute(tuple2); + + verify(collector).ack(tuple1); + verify(collector).ack(tuple2); + + assertEquals(2, countNonZeroLengthFiles(testRoot)); + } + + @Test + public void testPartitionedOutput() throws IOException { + HdfsBolt bolt = makeHdfsBolt(hdfsURI, 1, 1000f); + + Partitioner partitoner = new Partitioner() { + @Override + public String getPartitionPath(Tuple tuple) { + return Path.SEPARATOR + tuple.getStringByField("city"); + } + }; + + bolt.prepare(new Config(), topologyContext, collector); + bolt.withPartitioner(partitoner); + + bolt.execute(tuple1); + bolt.execute(tuple2); + + verify(collector).ack(tuple1); + verify(collector).ack(tuple2); + + assertEquals(1, countNonZeroLengthFiles(testRoot + "/SFO")); + assertEquals(1, countNonZeroLengthFiles(testRoot + "/SJO")); + } + + @Test + public void testTwoTuplesOneFile() throws IOException { + HdfsBolt bolt = makeHdfsBolt(hdfsURI, 2, 10000f); + bolt.prepare(new Config(), topologyContext, collector); + bolt.execute(tuple1); + + verifyNoInteractions(collector); + + bolt.execute(tuple2); + verify(collector).ack(tuple1); + verify(collector).ack(tuple2); + + assertEquals(1, countNonZeroLengthFiles(testRoot)); + } + + @Test + public void testFailedSync() throws IOException { + HdfsBolt bolt = makeHdfsBolt(hdfsURI, 2, 10000f); + bolt.prepare(new Config(), topologyContext, collector); + bolt.execute(tuple1); + + fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); + + // All writes/syncs will fail so this should cause a RuntimeException + assertThrows(RuntimeException.class, () -> bolt.execute(tuple1)); + + } + + // One tuple and one rotation should yield one file with data + // The failed executions should not cause rotations and any new files + @Test + public void testFailureFilecount() throws IOException, InterruptedException { + HdfsBolt bolt = makeHdfsBolt(hdfsURI, 1, .000001f); + bolt.prepare(new Config(), topologyContext, collector); + + bolt.execute(tuple1); + fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); + try { + bolt.execute(tuple2); + } catch (RuntimeException e) { + // + } + try { + bolt.execute(tuple2); + } catch (RuntimeException e) { + // + } + try { + bolt.execute(tuple2); + } catch (RuntimeException e) { + // + } + + assertEquals(1, countNonZeroLengthFiles(testRoot)); + assertEquals(0, countZeroLengthFiles(testRoot)); + } + + @Test + public void testTickTuples() throws IOException { + HdfsBolt bolt = makeHdfsBolt(hdfsURI, 10, 10000f); + bolt.prepare(new Config(), topologyContext, collector); + + bolt.execute(tuple1); + + //Should not have flushed to file system yet + assertEquals(0, countNonZeroLengthFiles(testRoot)); + + bolt.execute(MockTupleHelpers.mockTickTuple()); + + //Tick should have flushed it + assertEquals(1, countNonZeroLengthFiles(testRoot)); + } + + @Test + public void testCleanupDoesNotThrowExceptionWhenRotationPolicyIsNotTimed() { + //STORM-3372: Rotation policy other than TimedRotationPolicy causes NPE on cleanup + FileRotationPolicy fieldsRotationPolicy = + new FileSizeRotationPolicy(10_000, FileSizeRotationPolicy.Units.MB); + HdfsBolt bolt = makeHdfsBolt(hdfsURI, 10, 10000f) + .withRotationPolicy(fieldsRotationPolicy); + bolt.prepare(new Config(), topologyContext, collector); + bolt.cleanup(); + } + + public void createBaseDirectory(FileSystem passedFs, String path) throws IOException { + Path p = new Path(path); + passedFs.mkdirs(p); + } + + private HdfsBolt makeHdfsBolt(String nameNodeAddr, int countSync, float rotationSizeMB) { + + RecordFormat fieldsFormat = new DelimitedRecordFormat().withFieldDelimiter("|"); + + SyncPolicy fieldsSyncPolicy = new CountSyncPolicy(countSync); + + FileRotationPolicy fieldsRotationPolicy = + new FileSizeRotationPolicy(rotationSizeMB, FileSizeRotationPolicy.Units.MB); + + FileNameFormat fieldsFileNameFormat = new DefaultFileNameFormat().withPath(testRoot); + + return new HdfsBolt() + .withFsUrl(nameNodeAddr) + .withFileNameFormat(fieldsFileNameFormat) + .withRecordFormat(fieldsFormat) + .withRotationPolicy(fieldsRotationPolicy) + .withSyncPolicy(fieldsSyncPolicy); + } + + private Tuple generateTestTuple(Object id, Object msg, Object city, Object state) { + TopologyBuilder builder = new TopologyBuilder(); + GeneralTopologyContext topologyContext = new GeneralTopologyContext(builder.createTopology(), + new Config(), new HashMap<>(), new HashMap<>(), new HashMap<>(), + "") { + @Override + public Fields getComponentOutputFields(String componentId, String streamId) { + return new Fields("id", "msg", "city", "state"); + } + }; + return new TupleImpl(topologyContext, new Values(id, msg, city, state), topologyContext.getComponentId(1), 1, ""); + } + + // Generally used to compare how files were actually written and compare to expectations based on total + // amount of data written and rotation policies + private int countNonZeroLengthFiles(String path) throws IOException { + Path p = new Path(path); + int nonZero = 0; + + for (FileStatus file : fs.listStatus(p)) { + if (file.getLen() > 0) { + nonZero++; + } + } + + return nonZero; + } + + private int countZeroLengthFiles(String path) throws IOException { + Path p = new Path(path); + int zeroLength = 0; + + for (FileStatus file : fs.listStatus(p)) { + if (file.getLen() == 0) { + zeroLength++; + } + } + + return zeroLength; + } +} diff --git a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/TestSequenceFileBolt.java b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/TestSequenceFileBolt.java new file mode 100644 index 00000000000..3fe4be69335 --- /dev/null +++ b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/TestSequenceFileBolt.java @@ -0,0 +1,181 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.bolt; + +import java.io.File; +import java.io.IOException; +import java.util.HashMap; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.storm.Config; +import org.apache.storm.hdfs.bolt.format.DefaultFileNameFormat; +import org.apache.storm.hdfs.bolt.format.DefaultSequenceFormat; +import org.apache.storm.hdfs.bolt.format.FileNameFormat; +import org.apache.storm.hdfs.bolt.format.SequenceFormat; +import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy; +import org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy; +import org.apache.storm.hdfs.bolt.sync.CountSyncPolicy; +import org.apache.storm.hdfs.bolt.sync.SyncPolicy; +import org.apache.storm.hdfs.testing.MiniDFSClusterExtension; +import org.apache.storm.task.GeneralTopologyContext; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.TopologyBuilder; +import org.apache.storm.tuple.Fields; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.TupleImpl; +import org.apache.storm.tuple.Values; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.api.extension.RegisterExtension; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; + +@ExtendWith(MockitoExtension.class) +public class TestSequenceFileBolt { + + private static final Logger LOG = LoggerFactory.getLogger(TestSequenceFileBolt.class); + private static final String testRoot = "/unittest"; + @RegisterExtension + public static final MiniDFSClusterExtension DFS_CLUSTER_EXTENSION = new MiniDFSClusterExtension(() -> { + Configuration conf = new Configuration(); + conf.set("fs.trash.interval", "10"); + conf.setBoolean("dfs.permissions", true); + File baseDir = new File("./target/hdfs/").getAbsoluteFile(); + FileUtil.fullyDelete(baseDir); + conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath()); + return conf; + }); + + Tuple tuple1 = generateTestTuple(1l, "first tuple"); + Tuple tuple2 = generateTestTuple(2l, "second tuple"); + private String hdfsURI; + private DistributedFileSystem fs; + @Mock + private OutputCollector collector; + @Mock + private TopologyContext topologyContext; + + @BeforeEach + public void setup() throws Exception { + fs = DFS_CLUSTER_EXTENSION.getDfscluster().getFileSystem(); + hdfsURI = "hdfs://localhost:" + DFS_CLUSTER_EXTENSION.getDfscluster().getNameNodePort() + "/"; + } + + @AfterEach + public void shutDown() throws IOException { + fs.close(); + } + + @Test + public void testTwoTuplesTwoFiles() throws IOException { + SequenceFileBolt bolt = makeSeqBolt(hdfsURI, 1, .00001f); + + bolt.prepare(new Config(), topologyContext, collector); + bolt.execute(tuple1); + bolt.execute(tuple2); + + verify(collector).ack(tuple1); + verify(collector).ack(tuple2); + + assertEquals(2, countNonZeroLengthFiles(testRoot)); + } + + @Test + public void testTwoTuplesOneFile() throws IOException { + SequenceFileBolt bolt = makeSeqBolt(hdfsURI, 2, 10000f); + bolt.prepare(new Config(), topologyContext, collector); + bolt.execute(tuple1); + + verifyNoInteractions(collector); + + bolt.execute(tuple2); + verify(collector).ack(tuple1); + verify(collector).ack(tuple2); + + assertEquals(1, countNonZeroLengthFiles(testRoot)); + } + + @Test + public void testFailedSync() throws IOException { + SequenceFileBolt bolt = makeSeqBolt(hdfsURI, 2, 10000f); + bolt.prepare(new Config(), topologyContext, collector); + bolt.execute(tuple1); + + fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); + // All writes/syncs will fail so this should cause a RuntimeException + assertThrows(RuntimeException.class, () -> bolt.execute(tuple1)); + } + + private SequenceFileBolt makeSeqBolt(String nameNodeAddr, int countSync, float rotationSizeMB) { + + SyncPolicy fieldsSyncPolicy = new CountSyncPolicy(countSync); + + FileRotationPolicy fieldsRotationPolicy = + new FileSizeRotationPolicy(rotationSizeMB, FileSizeRotationPolicy.Units.MB); + + FileNameFormat fieldsFileNameFormat = new DefaultFileNameFormat().withPath(testRoot); + + SequenceFormat seqFormat = new DefaultSequenceFormat("key", "value"); + + return new SequenceFileBolt() + .withFsUrl(nameNodeAddr) + .withFileNameFormat(fieldsFileNameFormat) + .withRotationPolicy(fieldsRotationPolicy) + .withSequenceFormat(seqFormat) + .withSyncPolicy(fieldsSyncPolicy); + } + + private Tuple generateTestTuple(Long key, String value) { + TopologyBuilder builder = new TopologyBuilder(); + GeneralTopologyContext topologyContext = new GeneralTopologyContext(builder.createTopology(), + new Config(), new HashMap<>(), new HashMap<>(), new HashMap<>(), + "") { + @Override + public Fields getComponentOutputFields(String componentId, String streamId) { + return new Fields("key", "value"); + } + }; + return new TupleImpl(topologyContext, new Values(key, value), topologyContext.getComponentId(1), 1, ""); + } + + // Generally used to compare how files were actually written and compare to expectations based on total + // amount of data written and rotation policies + private int countNonZeroLengthFiles(String path) throws IOException { + Path p = new Path(path); + int nonZero = 0; + + for (FileStatus file : fs.listStatus(p)) { + if (file.getLen() > 0) { + nonZero++; + } + } + + return nonZero; + } + +} diff --git a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/TestWritersMap.java b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/TestWritersMap.java new file mode 100644 index 00000000000..1fabcee4e73 --- /dev/null +++ b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/TestWritersMap.java @@ -0,0 +1,79 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.bolt; + +import org.apache.hadoop.fs.Path; +import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy; +import org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy; +import org.apache.storm.hdfs.common.AbstractHDFSWriter; +import org.apache.storm.tuple.Tuple; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class TestWritersMap { + + AbstractHdfsBolt.WritersMap map = new AbstractHdfsBolt.WritersMap(2, null); + AbstractHDFSWriterMock foo = new AbstractHDFSWriterMock(new FileSizeRotationPolicy(1, FileSizeRotationPolicy.Units.KB), null); + AbstractHDFSWriterMock bar = new AbstractHDFSWriterMock(new FileSizeRotationPolicy(1, FileSizeRotationPolicy.Units.KB), null); + AbstractHDFSWriterMock baz = new AbstractHDFSWriterMock(new FileSizeRotationPolicy(1, FileSizeRotationPolicy.Units.KB), null); + + @Test + public void testLRUBehavior() { + map.put("FOO", foo); + map.put("BAR", bar); + + //Access foo to make it most recently used + map.get("FOO"); + + //Add an element and bar should drop out + map.put("BAZ", baz); + + assertTrue(map.keySet().contains("FOO")); + assertTrue(map.keySet().contains("BAZ")); + + assertFalse(map.keySet().contains("BAR")); + + // The removed writer should have been closed + assertTrue(bar.isClosed); + + assertFalse(foo.isClosed); + assertFalse(baz.isClosed); + } + + public static final class AbstractHDFSWriterMock extends AbstractHDFSWriter { + Boolean isClosed; + + public AbstractHDFSWriterMock(FileRotationPolicy policy, Path path) { + super(policy, path); + isClosed = false; + } + + @Override + protected void doWrite(Tuple tuple) { + + } + + @Override + protected void doSync() { + + } + + @Override + protected void doClose() { + isClosed = true; + } + } + +} diff --git a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/format/TestSimpleFileNameFormat.java b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/format/TestSimpleFileNameFormat.java new file mode 100644 index 00000000000..4ac1257dca7 --- /dev/null +++ b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/format/TestSimpleFileNameFormat.java @@ -0,0 +1,78 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.bolt.format; + +import java.net.UnknownHostException; +import java.text.SimpleDateFormat; +import java.util.HashMap; +import java.util.Map; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.utils.Utils; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class TestSimpleFileNameFormat { + + @Test + public void testDefaults() { + Map topoConf = new HashMap(); + SimpleFileNameFormat format = new SimpleFileNameFormat(); + format.prepare(null, createTopologyContext(topoConf)); + long now = System.currentTimeMillis(); + String path = format.getPath(); + String name = format.getName(1, now); + + assertEquals("/storm", path); + String time = new SimpleDateFormat("yyyyMMddHHmmss").format(now); + assertEquals(time + ".1.txt", name); + } + + @Test + public void testParameters() { + SimpleFileNameFormat format = new SimpleFileNameFormat() + .withName("$TIME.$HOST.$COMPONENT.$TASK.$NUM.txt") + .withPath("/mypath") + .withTimeFormat("yyyy-MM-dd HH:mm:ss"); + Map topoConf = new HashMap(); + format.prepare(null, createTopologyContext(topoConf)); + long now = System.currentTimeMillis(); + String path = format.getPath(); + String name = format.getName(1, now); + + assertEquals("/mypath", path); + String time = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(now); + String host = null; + try { + host = Utils.localHostname(); + } catch (UnknownHostException e) { + e.printStackTrace(); + } + assertEquals(time + "." + host + ".Xcom.7.1.txt", name); + } + + public void testTimeFormat() { + Map topoConf = new HashMap(); + SimpleFileNameFormat format = new SimpleFileNameFormat() + .withTimeFormat("xyz"); + assertThrows(IllegalArgumentException.class, () -> format.prepare(null, createTopologyContext(topoConf))); + } + + private TopologyContext createTopologyContext(Map topoConf) { + Map taskToComponent = new HashMap<>(); + taskToComponent.put(7, "Xcom"); + return new TopologyContext(null, topoConf, taskToComponent, null, null, null, null, null, null, 7, 6703, null, null, null, null, + null, null, null); + } +} diff --git a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/spout/ConfigsTest.java b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/spout/ConfigsTest.java new file mode 100644 index 00000000000..56cc1694d3a --- /dev/null +++ b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/spout/ConfigsTest.java @@ -0,0 +1,82 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.spout; + +import java.util.HashMap; +import java.util.Map; +import org.apache.storm.validation.ConfigValidation; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.fail; + +public class ConfigsTest { + + public static void verifyBad(String key, Object value) { + Map conf = new HashMap<>(); + conf.put(key, value); + try { + ConfigValidation.validateFields(conf); + fail("Expected " + key + " = " + value + " to throw Exception, but it didn't"); + } catch (IllegalArgumentException e) { + //good + } + } + + @SuppressWarnings("deprecation") + @Test + public void testGood() { + Map conf = new HashMap<>(); + conf.put(Configs.READER_TYPE, Configs.TEXT); + ConfigValidation.validateFields(conf); + conf.put(Configs.READER_TYPE, Configs.SEQ); + ConfigValidation.validateFields(conf); + conf.put(Configs.READER_TYPE, TextFileReader.class.getName()); + ConfigValidation.validateFields(conf); + conf.put(Configs.HDFS_URI, "hdfs://namenode/"); + ConfigValidation.validateFields(conf); + conf.put(Configs.SOURCE_DIR, "/input/source"); + ConfigValidation.validateFields(conf); + conf.put(Configs.ARCHIVE_DIR, "/input/done"); + ConfigValidation.validateFields(conf); + conf.put(Configs.BAD_DIR, "/input/bad"); + ConfigValidation.validateFields(conf); + conf.put(Configs.LOCK_DIR, "/topology/lock"); + ConfigValidation.validateFields(conf); + conf.put(Configs.COMMIT_FREQ_COUNT, 0); + ConfigValidation.validateFields(conf); + conf.put(Configs.COMMIT_FREQ_COUNT, 100); + ConfigValidation.validateFields(conf); + conf.put(Configs.COMMIT_FREQ_SEC, 100); + ConfigValidation.validateFields(conf); + conf.put(Configs.MAX_OUTSTANDING, 500); + ConfigValidation.validateFields(conf); + conf.put(Configs.LOCK_TIMEOUT, 100); + ConfigValidation.validateFields(conf); + conf.put(Configs.CLOCKS_INSYNC, true); + ConfigValidation.validateFields(conf); + conf.put(Configs.IGNORE_SUFFIX, ".writing"); + ConfigValidation.validateFields(conf); + Map hdfsConf = new HashMap<>(); + hdfsConf.put("A", "B"); + conf.put(Configs.DEFAULT_HDFS_CONFIG_KEY, hdfsConf); + ConfigValidation.validateFields(conf); + } + + @SuppressWarnings("deprecation") + @Test + public void testBad() { + verifyBad(Configs.READER_TYPE, "SomeString"); + verifyBad(Configs.HDFS_URI, 100); + verifyBad(Configs.COMMIT_FREQ_COUNT, -10); + } +} diff --git a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/spout/TestDirLock.java b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/spout/TestDirLock.java new file mode 100644 index 00000000000..629249c4d84 --- /dev/null +++ b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/spout/TestDirLock.java @@ -0,0 +1,175 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.spout; + +import java.io.IOException; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.storm.hdfs.testing.MiniDFSClusterExtension; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.RegisterExtension; + +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class TestDirLock { + + private static final int LOCK_EXPIRY_SEC = 1; + private final Path locksDir = new Path("/tmp/lockdir"); + @RegisterExtension + public static final MiniDFSClusterExtension DFS_CLUSTER_EXTENSION = new MiniDFSClusterExtension(); + private FileSystem fs; + private HdfsConfiguration conf = new HdfsConfiguration(); + + @BeforeEach + public void setUp() throws IOException { + conf.set(CommonConfigurationKeys.IPC_PING_INTERVAL_KEY, "5000"); + fs = DFS_CLUSTER_EXTENSION.getDfscluster().getFileSystem(); + assert fs.mkdirs(locksDir); + } + + @AfterEach + public void teardownClass() throws IOException { + fs.delete(locksDir, true); + fs.close(); + } + + @Test + public void testBasicLocking() throws Exception { + // 1 grab lock + DirLock lock = DirLock.tryLock(fs, locksDir); + assertTrue(fs.exists(lock.getLockFile())); + + // 2 try to grab another lock while dir is locked + DirLock lock2 = DirLock.tryLock(fs, locksDir); // should fail + assertNull(lock2); + + // 3 let go first lock + lock.release(); + assertFalse(fs.exists(lock.getLockFile())); + + // 4 try locking again + lock2 = DirLock.tryLock(fs, locksDir); + assertTrue(fs.exists(lock2.getLockFile())); + lock2.release(); + assertFalse(fs.exists(lock.getLockFile())); + lock2.release(); // should be thrown + } + + @Test + public void testConcurrentLocking() throws Exception { + DirLockingThread[] threads = null; + try { + threads = startThreads(100, locksDir); + for (DirLockingThread thd : threads) { + thd.join(30_000); + assertTrue(thd.cleanExit, thd.getName() + " did not exit cleanly"); + } + + Path lockFile = new Path(locksDir + Path.SEPARATOR + DirLock.DIR_LOCK_FILE); + assertFalse(fs.exists(lockFile)); + } finally { + if (threads != null) { + for (DirLockingThread thread : threads) { + thread.interrupt(); + thread.join(30_000); + if (thread.isAlive()) { + throw new RuntimeException("Failed to stop threads within 30 seconds, threads may leak into other tests"); + } + } + } + } + } + + private DirLockingThread[] startThreads(int thdCount, Path dir) + throws IOException { + DirLockingThread[] result = new DirLockingThread[thdCount]; + for (int i = 0; i < thdCount; i++) { + result[i] = new DirLockingThread(i, fs, dir); + } + + for (DirLockingThread thd : result) { + thd.start(); + } + return result; + } + + @Test + public void testLockRecovery() throws Exception { + DirLock lock1 = DirLock.tryLock(fs, locksDir); // should pass + assertNotNull(lock1); + + DirLock lock2 = DirLock.takeOwnershipIfStale(fs, locksDir, LOCK_EXPIRY_SEC); // should fail + assertNull(lock2); + + Thread.sleep(LOCK_EXPIRY_SEC * 1000 + 500); // wait for lock to expire + assertTrue(fs.exists(lock1.getLockFile())); + + DirLock lock3 = DirLock.takeOwnershipIfStale(fs, locksDir, LOCK_EXPIRY_SEC); // should pass now + assertNotNull(lock3); + assertTrue(fs.exists(lock3.getLockFile())); + lock3.release(); + assertFalse(fs.exists(lock3.getLockFile())); + lock1.release(); // should not throw + } + + static class DirLockingThread extends Thread { + + private final FileSystem fs; + private final Path dir; + public boolean cleanExit = false; + private int thdNum; + + public DirLockingThread(int thdNum, FileSystem fs, Path dir) { + this.thdNum = thdNum; + this.fs = fs; + this.dir = dir; + } + + @Override + public void run() { + Thread.currentThread().setName("DirLockingThread-" + thdNum); + DirLock lock = null; + try { + do { + System.err.println("Trying lock " + getName()); + lock = DirLock.tryLock(fs, dir); + System.err.println("Acquired lock " + getName()); + if (lock == null) { + System.out.println("Retrying lock - " + getName()); + } + } while (lock == null && !Thread.currentThread().isInterrupted()); + cleanExit = true; + } catch (IOException e) { + e.printStackTrace(); + } finally { + try { + if (lock != null) { + lock.release(); + System.err.println("Released lock " + getName()); + } + } catch (IOException e) { + e.printStackTrace(System.err); + } + } + System.err.println("Thread exiting " + getName()); + } // run() + + } // class DirLockingThread +} diff --git a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/spout/TestFileLock.java b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/spout/TestFileLock.java new file mode 100644 index 00000000000..54e7ab6f1e3 --- /dev/null +++ b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/spout/TestFileLock.java @@ -0,0 +1,380 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.spout; + +import java.io.BufferedReader; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStreamReader; +import java.lang.reflect.Method; +import java.util.ArrayList; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.storm.hdfs.common.HdfsUtils; +import org.apache.storm.hdfs.testing.MiniDFSClusterExtension; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.RegisterExtension; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class TestFileLock { + + private final Path filesDir = new Path("/tmp/filesdir"); + private final Path locksDir = new Path("/tmp/locksdir"); + @RegisterExtension + public static final MiniDFSClusterExtension DFS_CLUSTER_EXTENSION = new MiniDFSClusterExtension(); + private FileSystem fs; + private HdfsConfiguration conf = new HdfsConfiguration(); + + public static void closeUnderlyingLockFile(FileLock lock) throws ReflectiveOperationException { + Method m = FileLock.class.getDeclaredMethod("forceCloseLockFile"); + m.setAccessible(true); + m.invoke(lock); + } + + @BeforeEach + public void setup() throws IOException { + conf.set(CommonConfigurationKeys.IPC_PING_INTERVAL_KEY, "5000"); + fs = DFS_CLUSTER_EXTENSION.getDfscluster().getFileSystem(); + assert fs.mkdirs(filesDir); + assert fs.mkdirs(locksDir); + } + + @AfterEach + public void teardown() throws IOException { + fs.delete(filesDir, true); + fs.delete(locksDir, true); + fs.close(); + } + + @Test + public void testBasicLocking() throws Exception { + // create empty files in filesDir + Path file1 = new Path(filesDir + Path.SEPARATOR + "file1"); + Path file2 = new Path(filesDir + Path.SEPARATOR + "file2"); + fs.create(file1).close(); + fs.create(file2).close(); // create empty file + + // acquire lock on file1 and verify if worked + FileLock lock1a = FileLock.tryLock(fs, file1, locksDir, "spout1"); + assertNotNull(lock1a); + assertTrue(fs.exists(lock1a.getLockFile())); + assertEquals(lock1a.getLockFile().getParent(), locksDir); // verify lock file location + assertEquals(lock1a.getLockFile().getName(), file1.getName()); // verify lock filename + + // acquire another lock on file1 and verify it failed + FileLock lock1b = FileLock.tryLock(fs, file1, locksDir, "spout1"); + assertNull(lock1b); + + // release lock on file1 and check + lock1a.release(); + assertFalse(fs.exists(lock1a.getLockFile())); + + // Retry locking and verify + FileLock lock1c = FileLock.tryLock(fs, file1, locksDir, "spout1"); + assertNotNull(lock1c); + assertTrue(fs.exists(lock1c.getLockFile())); + assertEquals(lock1c.getLockFile().getParent(), locksDir); // verify lock file location + assertEquals(lock1c.getLockFile().getName(), file1.getName()); // verify lock filename + + // try locking another file2 at the same time + FileLock lock2a = FileLock.tryLock(fs, file2, locksDir, "spout1"); + assertNotNull(lock2a); + assertTrue(fs.exists(lock2a.getLockFile())); + assertEquals(lock2a.getLockFile().getParent(), locksDir); // verify lock file location + assertEquals(lock2a.getLockFile().getName(), file2.getName()); // verify lock filename + + // release both locks + lock2a.release(); + assertFalse(fs.exists(lock2a.getLockFile())); + lock1c.release(); + assertFalse(fs.exists(lock1c.getLockFile())); + } + + @Test + public void testHeartbeat() throws Exception { + Path file1 = new Path(filesDir + Path.SEPARATOR + "file1"); + fs.create(file1).close(); + + // acquire lock on file1 + FileLock lock1 = FileLock.tryLock(fs, file1, locksDir, "spout1"); + assertNotNull(lock1); + assertTrue(fs.exists(lock1.getLockFile())); + + ArrayList lines = readTextFile(lock1.getLockFile()); + assertEquals(1, lines.size(), "heartbeats appear to be missing"); + + // heartbeat upon it + lock1.heartbeat("1"); + lock1.heartbeat("2"); + lock1.heartbeat("3"); + + lines = readTextFile(lock1.getLockFile()); + assertEquals(4, lines.size(), "heartbeats appear to be missing"); + + lock1.heartbeat("4"); + lock1.heartbeat("5"); + lock1.heartbeat("6"); + + lines = readTextFile(lock1.getLockFile()); + assertEquals(7, lines.size(), "heartbeats appear to be missing"); + + lock1.release(); + lines = readTextFile(lock1.getLockFile()); + assertNull(lines); + assertFalse(fs.exists(lock1.getLockFile())); + } + + @Test + public void testConcurrentLocking() throws IOException, InterruptedException { + Path file1 = new Path(filesDir + Path.SEPARATOR + "file1"); + fs.create(file1).close(); + + FileLockingThread[] threads = null; + try { + threads = startThreads(100, file1, locksDir); + for (FileLockingThread thd : threads) { + thd.join(30_000); + assertTrue(thd.cleanExit, thd.getName() + " did not exit cleanly"); + } + + Path lockFile = new Path(locksDir + Path.SEPARATOR + file1.getName()); + assertFalse(fs.exists(lockFile)); + } finally { + if (threads != null) { + for (FileLockingThread thread : threads) { + thread.interrupt(); + thread.join(30_000); + if (thread.isAlive()) { + throw new RuntimeException("Failed to stop threads within 30 seconds, threads may leak into other tests"); + } + } + } + } + } + + private FileLockingThread[] startThreads(int thdCount, Path fileToLock, Path locksDir) + throws IOException { + FileLockingThread[] result = new FileLockingThread[thdCount]; + for (int i = 0; i < thdCount; i++) { + result[i] = new FileLockingThread(i, fs, fileToLock, locksDir, "spout" + i); + } + + for (FileLockingThread thd : result) { + thd.start(); + } + return result; + } + + @Test + public void testStaleLockDetection_SingleLock() throws Exception { + final int LOCK_EXPIRY_SEC = 1; + final int WAIT_MSEC = 1500; + Path file1 = new Path(filesDir + Path.SEPARATOR + "file1"); + fs.create(file1).close(); + FileLock lock1 = FileLock.tryLock(fs, file1, locksDir, "spout1"); + try { + // acquire lock on file1 + assertNotNull(lock1); + assertTrue(fs.exists(lock1.getLockFile())); + Thread.sleep(WAIT_MSEC); // wait for lock to expire + HdfsUtils.Pair expired = FileLock.locateOldestExpiredLock(fs, locksDir, LOCK_EXPIRY_SEC); + assertNotNull(expired); + + // heartbeat, ensure its no longer stale and read back the heartbeat data + lock1.heartbeat("1"); + expired = FileLock.locateOldestExpiredLock(fs, locksDir, 1); + assertNull(expired); + + FileLock.LogEntry lastEntry = lock1.getLastLogEntry(); + assertNotNull(lastEntry); + assertEquals("1", lastEntry.fileOffset); + + // wait and check for expiry again + Thread.sleep(WAIT_MSEC); + expired = FileLock.locateOldestExpiredLock(fs, locksDir, LOCK_EXPIRY_SEC); + assertNotNull(expired); + } finally { + lock1.release(); + fs.delete(file1, false); + } + } + + @Test + public void testStaleLockDetection_MultipleLocks() throws Exception { + final int LOCK_EXPIRY_SEC = 1; + final int WAIT_MSEC = 1500; + Path file1 = new Path(filesDir + Path.SEPARATOR + "file1"); + Path file2 = new Path(filesDir + Path.SEPARATOR + "file2"); + Path file3 = new Path(filesDir + Path.SEPARATOR + "file3"); + + fs.create(file1).close(); + fs.create(file2).close(); + fs.create(file3).close(); + + // 1) acquire locks on file1,file2,file3 + FileLock lock1 = FileLock.tryLock(fs, file1, locksDir, "spout1"); + FileLock lock2 = FileLock.tryLock(fs, file2, locksDir, "spout2"); + FileLock lock3 = FileLock.tryLock(fs, file3, locksDir, "spout3"); + assertNotNull(lock1); + assertNotNull(lock2); + assertNotNull(lock3); + + try { + HdfsUtils.Pair expired = FileLock.locateOldestExpiredLock(fs, locksDir, LOCK_EXPIRY_SEC); + assertNull(expired); + + // 2) wait for all 3 locks to expire then heart beat on 2 locks and verify stale lock + Thread.sleep(WAIT_MSEC); + lock1.heartbeat("1"); + lock2.heartbeat("1"); + + expired = FileLock.locateOldestExpiredLock(fs, locksDir, LOCK_EXPIRY_SEC); + assertNotNull(expired); + assertEquals("spout3", expired.getValue().componentId); + } finally { + lock1.release(); + lock2.release(); + lock3.release(); + fs.delete(file1, false); + fs.delete(file2, false); + fs.delete(file3, false); + } + } + + @Test + public void testLockRecovery() throws Exception { + final int LOCK_EXPIRY_SEC = 1; + final int WAIT_MSEC = LOCK_EXPIRY_SEC * 1000 + 500; + Path file1 = new Path(filesDir + Path.SEPARATOR + "file1"); + Path file2 = new Path(filesDir + Path.SEPARATOR + "file2"); + Path file3 = new Path(filesDir + Path.SEPARATOR + "file3"); + + fs.create(file1).close(); + fs.create(file2).close(); + fs.create(file3).close(); + + // 1) acquire locks on file1,file2,file3 + FileLock lock1 = FileLock.tryLock(fs, file1, locksDir, "spout1"); + FileLock lock2 = FileLock.tryLock(fs, file2, locksDir, "spout2"); + FileLock lock3 = FileLock.tryLock(fs, file3, locksDir, "spout3"); + assertNotNull(lock1); + assertNotNull(lock2); + assertNotNull(lock3); + + try { + HdfsUtils.Pair expired = FileLock.locateOldestExpiredLock(fs, locksDir, LOCK_EXPIRY_SEC); + assertNull(expired); + + // 1) Simulate lock file lease expiring and getting closed by HDFS + closeUnderlyingLockFile(lock3); + + // 2) wait for all 3 locks to expire then heart beat on 2 locks + Thread.sleep(WAIT_MSEC * 2); // wait for locks to expire + lock1.heartbeat("1"); + lock2.heartbeat("1"); + + // 3) Take ownership of stale lock + FileLock lock3b = FileLock.acquireOldestExpiredLock(fs, locksDir, LOCK_EXPIRY_SEC, "spout1"); + assertNotNull(lock3b); + assertEquals(Path.getPathWithoutSchemeAndAuthority(lock3b.getLockFile()), lock3.getLockFile(), "Expected lock3 file"); + } finally { + lock1.release(); + lock2.release(); + lock3.release(); + fs.delete(file1, false); + fs.delete(file2, false); + try { + fs.delete(file3, false); + } catch (Exception e) { + e.printStackTrace(); + } + } + } + + /** + * return null if file not found + */ + private ArrayList readTextFile(Path file) throws IOException { + try (FSDataInputStream os = fs.open(file)) { + if (os == null) { + return null; + } + BufferedReader reader = new BufferedReader(new InputStreamReader(os)); + ArrayList lines = new ArrayList<>(); + for (String line = reader.readLine(); line != null; line = reader.readLine()) { + lines.add(line); + } + return lines; + } catch (FileNotFoundException e) { + return null; + } + } + + static class FileLockingThread extends Thread { + + private final FileSystem fs; + public boolean cleanExit = false; + private final int thdNum; + private final Path fileToLock; + private final Path locksDir; + private final String spoutId; + + public FileLockingThread(int thdNum, FileSystem fs, Path fileToLock, Path locksDir, String spoutId) { + this.thdNum = thdNum; + this.fs = fs; + this.fileToLock = fileToLock; + this.locksDir = locksDir; + this.spoutId = spoutId; + } + + @Override + public void run() { + Thread.currentThread().setName("FileLockingThread-" + thdNum); + FileLock lock = null; + try { + do { + System.err.println("Trying lock - " + getName()); + lock = FileLock.tryLock(fs, this.fileToLock, this.locksDir, spoutId); + System.err.println("Acquired lock - " + getName()); + if (lock == null) { + System.out.println("Retrying lock - " + getName()); + } + } while (lock == null && !Thread.currentThread().isInterrupted()); + cleanExit = true; + } catch (Exception e) { + e.printStackTrace(); + } finally { + try { + if (lock != null) { + lock.release(); + System.err.println("Released lock - " + getName()); + } + } catch (IOException e) { + e.printStackTrace(System.err); + } + } + System.err.println("Thread exiting - " + getName()); + } // run() + + } // class FileLockingThread +} diff --git a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/spout/TestHdfsSemantics.java b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/spout/TestHdfsSemantics.java new file mode 100644 index 00000000000..d17e8ac96b7 --- /dev/null +++ b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/spout/TestHdfsSemantics.java @@ -0,0 +1,199 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.spout; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.core.IsNull.notNullValue; + +import java.io.IOException; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.storm.hdfs.testing.MiniDFSClusterExtension; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.RegisterExtension; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +public class TestHdfsSemantics { + + private final HdfsConfiguration conf = new HdfsConfiguration(); + private final Path dir = new Path("/tmp/filesdir"); + @RegisterExtension + public static final MiniDFSClusterExtension DFS_CLUSTER_EXTENSION = new MiniDFSClusterExtension(); + private FileSystem fs; + + @BeforeEach + public void setup() throws IOException { + conf.set(CommonConfigurationKeys.IPC_PING_INTERVAL_KEY, "5000"); + fs = DFS_CLUSTER_EXTENSION.getDfscluster().getFileSystem(); + assert fs.mkdirs(dir); + } + + @AfterEach + public void teardown() throws IOException { + fs.delete(dir, true); + fs.close(); + } + + @Test + public void testDeleteSemantics() throws Exception { + Path file = new Path(dir.toString() + Path.SEPARATOR_CHAR + "file1"); + // try { + // 1) Delete absent file - should return false + assertFalse(fs.exists(file)); + try { + assertFalse(fs.delete(file, false)); + } catch (IOException e) { + e.printStackTrace(); + } + + // 2) deleting open file - should return true + fs.create(file, false); + assertTrue(fs.delete(file, false)); + + // 3) deleting closed file - should return true + FSDataOutputStream os = fs.create(file, false); + os.close(); + assertTrue(fs.exists(file)); + assertTrue(fs.delete(file, false)); + assertFalse(fs.exists(file)); + } + + @Test + public void testConcurrentDeletion() throws Exception { + Path file = new Path(dir.toString() + Path.SEPARATOR_CHAR + "file1"); + fs.create(file).close(); + // 1 concurrent deletion - only one thread should succeed + FileDeletionThread[] threads = null; + try { + threads = startThreads(10, file); + int successCount = 0; + for (FileDeletionThread thd : threads) { + thd.join(30_000); + if (thd.succeeded) { + successCount++; + } + if (thd.exception != null) { + assertNotNull(thd.exception); + } + } + System.err.println(successCount); + assertEquals(1, successCount); + } finally { + if (threads != null) { + for (FileDeletionThread thread : threads) { + thread.interrupt(); + thread.join(30_000); + if (thread.isAlive()) { + throw new RuntimeException("Failed to stop threads within 30 seconds, threads may leak into other tests"); + } + } + } + } + } + + @Test + public void testAppendSemantics() throws Exception { + //1 try to append to an open file + Path file1 = new Path(dir.toString() + Path.SEPARATOR_CHAR + "file1"); + try (FSDataOutputStream os1 = fs.create(file1, false)) { + fs.append(file1); // should fail + fail("Append did not throw an exception"); + } catch (RemoteException e) { + // expecting AlreadyBeingCreatedException inside RemoteException + assertEquals(AlreadyBeingCreatedException.class, e.unwrapRemoteException().getClass()); + } + + //2 try to append to a closed file + try (FSDataOutputStream os2 = fs.append(file1)) { + assertThat(os2, notNullValue()); + } + } + + @Test + public void testDoubleCreateSemantics() throws Exception { + //1 create an already existing open file w/o override flag + Path file1 = new Path(dir.toString() + Path.SEPARATOR_CHAR + "file1"); + try (FSDataOutputStream os1 = fs.create(file1, false)) { + fs.create(file1, false); // should fail + fail("Create did not throw an exception"); + } catch (RemoteException e) { + assertEquals(AlreadyBeingCreatedException.class, e.unwrapRemoteException().getClass()); + } + //2 close file and retry creation + try { + fs.create(file1, false); // should still fail + fail("Create did not throw an exception"); + } catch (FileAlreadyExistsException e) { + // expecting this exception + } + + //3 delete file and retry creation + fs.delete(file1, false); + try (FSDataOutputStream os2 = fs.create(file1, false)) { + assertNotNull(os2); + } + } + + private FileDeletionThread[] startThreads(int thdCount, Path file) + throws IOException { + FileDeletionThread[] result = new FileDeletionThread[thdCount]; + for (int i = 0; i < thdCount; i++) { + result[i] = new FileDeletionThread(i, fs, file); + } + + for (FileDeletionThread thd : result) { + thd.start(); + } + return result; + } + + private static class FileDeletionThread extends Thread { + + private final int thdNum; + private final FileSystem fs; + private final Path file; + public boolean succeeded; + public Exception exception = null; + + public FileDeletionThread(int thdNum, FileSystem fs, Path file) + throws IOException { + this.thdNum = thdNum; + this.fs = fs; + this.file = file; + } + + @Override + public void run() { + Thread.currentThread().setName("FileDeletionThread-" + thdNum); + try { + succeeded = fs.delete(file, false); + } catch (Exception e) { + exception = e; + } + } // run() + + } // class FileLockingThread +} diff --git a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/spout/TestHdfsSpout.java b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/spout/TestHdfsSpout.java new file mode 100644 index 00000000000..f6f72142339 --- /dev/null +++ b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/spout/TestHdfsSpout.java @@ -0,0 +1,787 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.spout; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.core.Is.is; +import static org.junit.jupiter.api.Assertions.*; + +import java.io.BufferedReader; +import java.io.File; +import java.io.IOException; +import java.io.InputStreamReader; +import java.lang.reflect.Field; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.io.IntWritable; +import org.apache.hadoop.io.SequenceFile; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.storm.Config; +import org.apache.storm.hdfs.common.HdfsUtils; +import org.apache.storm.hdfs.common.HdfsUtils.Pair; +import org.apache.storm.hdfs.testing.MiniDFSClusterExtensionClassLevel; +import org.apache.storm.spout.SpoutOutputCollector; +import org.apache.storm.task.TopologyContext; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.RegisterExtension; +import org.junit.jupiter.api.io.TempDir; + +public class TestHdfsSpout { + + private static final Configuration conf = new Configuration(); + @RegisterExtension + public static final MiniDFSClusterExtensionClassLevel DFS_CLUSTER_EXTENSION = new MiniDFSClusterExtensionClassLevel(); + private static DistributedFileSystem fs; + @TempDir + public File tempFolder; + public File baseFolder; + private Path source; + private Path archive; + private Path badfiles; + + @BeforeAll + public static void setupClass() throws IOException { + fs = DFS_CLUSTER_EXTENSION.getDfscluster().getFileSystem(); + } + + @AfterAll + public static void teardownClass() throws IOException { + fs.close(); + } + + private static T getField(HdfsSpout spout, String fieldName) throws NoSuchFieldException, IllegalAccessException { + Field readerFld = HdfsSpout.class.getDeclaredField(fieldName); + readerFld.setAccessible(true); + return (T) readerFld.get(spout); + } + + private static boolean getBoolField(HdfsSpout spout, String fieldName) throws NoSuchFieldException, IllegalAccessException { + Field readerFld = HdfsSpout.class.getDeclaredField(fieldName); + readerFld.setAccessible(true); + return readerFld.getBoolean(spout); + } + + private static List readTextFile(FileSystem fs, String f) throws IOException { + Path file = new Path(f); + FSDataInputStream x = fs.open(file); + BufferedReader reader = new BufferedReader(new InputStreamReader(x)); + String line = null; + ArrayList result = new ArrayList<>(); + while ((line = reader.readLine()) != null) { + result.add(line); + } + return result; + } + + private static void createSeqFile(FileSystem fs, Path file, int rowCount) throws IOException { + + Configuration conf = new Configuration(); + try { + if (fs.exists(file)) { + fs.delete(file, false); + } + + SequenceFile.Writer w = SequenceFile.createWriter(fs, conf, file, IntWritable.class, Text.class); + for (int i = 0; i < rowCount; i++) { + w.append(new IntWritable(i), new Text("line " + i)); + } + w.close(); + System.out.println("done"); + } catch (IOException e) { + e.printStackTrace(); + + } + } + + @BeforeEach + public void setup() throws Exception { + baseFolder = new File(tempFolder, "hdfsspout"); + baseFolder.mkdir(); + source = new Path(baseFolder.toString() + "/source"); + fs.mkdirs(source); + archive = new Path(baseFolder.toString() + "/archive"); + fs.mkdirs(archive); + badfiles = new Path(baseFolder.toString() + "/bad"); + fs.mkdirs(badfiles); + } + + @AfterEach + public void shutDown() throws IOException { + fs.delete(new Path(baseFolder.toString()), true); + } + + @Test + public void testSimpleText_noACK() throws Exception { + Path file1 = new Path(source.toString() + "/file1.txt"); + createTextFile(file1, 5); + + Path file2 = new Path(source.toString() + "/file2.txt"); + createTextFile(file2, 5); + + try (AutoCloseableHdfsSpout closeableSpout = makeSpout(Configs.TEXT, TextFileReader.defaultFields)) { + HdfsSpout spout = closeableSpout.spout; + spout.setCommitFrequencyCount(1); + spout.setCommitFrequencySec(1); + + Map conf = getCommonConfigs(); + openSpout(spout, 0, conf); + + runSpout(spout, "r11"); + + Path arc1 = new Path(archive.toString() + "/file1.txt"); + Path arc2 = new Path(archive.toString() + "/file2.txt"); + checkCollectorOutput_txt((MockCollector) spout.getCollector(), arc1, arc2); + } + } + + @Test + public void testSimpleText_ACK() throws Exception { + Path file1 = new Path(source.toString() + "/file1.txt"); + createTextFile(file1, 5); + + Path file2 = new Path(source.toString() + "/file2.txt"); + createTextFile(file2, 5); + + try (AutoCloseableHdfsSpout closeableSpout = makeSpout(Configs.TEXT, TextFileReader.defaultFields)) { + HdfsSpout spout = closeableSpout.spout; + spout.setCommitFrequencyCount(1); + spout.setCommitFrequencySec(1); + + Map conf = getCommonConfigs(); + conf.put(Config.TOPOLOGY_ACKER_EXECUTORS, "1"); // enable ACKing + openSpout(spout, 0, conf); + + // consume file 1 + runSpout(spout, "r6", "a0", "a1", "a2", "a3", "a4"); + Path arc1 = new Path(archive.toString() + "/file1.txt"); + checkCollectorOutput_txt((MockCollector) spout.getCollector(), arc1); + + // consume file 2 + runSpout(spout, "r6", "a5", "a6", "a7", "a8", "a9"); + Path arc2 = new Path(archive.toString() + "/file2.txt"); + checkCollectorOutput_txt((MockCollector) spout.getCollector(), arc1, arc2); + } + } + + @Test + public void testEmptySimpleText_ACK() throws Exception { + Path file1 = new Path(source.toString() + "/file_empty.txt"); + createTextFile(file1, 0); + + //Ensure the second file has a later modified timestamp, as the spout should pick the first file first. + Thread.sleep(2); + + Path file2 = new Path(source.toString() + "/file.txt"); + createTextFile(file2, 5); + + try (AutoCloseableHdfsSpout closeableSpout = makeSpout(Configs.TEXT, TextFileReader.defaultFields)) { + HdfsSpout spout = closeableSpout.spout; + spout.setCommitFrequencyCount(1); + + Map conf = getCommonConfigs(); + conf.put(Config.TOPOLOGY_ACKER_EXECUTORS, "1"); // enable ACKing + openSpout(spout, 0, conf); + + // Read once. Since the first file is empty, the spout should continue with file 2 + runSpout(spout, "r6", "a0", "a1", "a2", "a3", "a4"); + //File 1 should be moved to archive + assertThat(fs.isFile(new Path(archive.toString() + "/file_empty.txt")), is(true)); + //File 2 should be read + Path arc2 = new Path(archive.toString() + "/file.txt"); + checkCollectorOutput_txt((MockCollector) spout.getCollector(), arc2); + } + } + + @Test + public void testResumeAbandoned_Text_NoAck() throws Exception { + Path file1 = new Path(source.toString() + "/file1.txt"); + createTextFile(file1, 6); + + final Integer lockExpirySec = 1; + + try (AutoCloseableHdfsSpout closeableSpout = makeSpout(Configs.TEXT, TextFileReader.defaultFields)) { + HdfsSpout spout = closeableSpout.spout; + spout.setCommitFrequencyCount(1); + spout.setCommitFrequencySec(1000); // effectively disable commits based on time + spout.setLockTimeoutSec(lockExpirySec); + + try (AutoCloseableHdfsSpout closeableSpout2 = makeSpout(Configs.TEXT, TextFileReader.defaultFields)) { + HdfsSpout spout2 = closeableSpout2.spout; + spout2.setCommitFrequencyCount(1); + spout2.setCommitFrequencySec(1000); // effectively disable commits based on time + spout2.setLockTimeoutSec(lockExpirySec); + + Map conf = getCommonConfigs(); + openSpout(spout, 0, conf); + openSpout(spout2, 1, conf); + + // consume file 1 partially + List res = runSpout(spout, "r2"); + assertEquals(2, res.size()); + + // abandon file + FileLock lock = getField(spout, "lock"); + TestFileLock.closeUnderlyingLockFile(lock); + Thread.sleep(lockExpirySec * 2 * 1000); + + // check lock file presence + assertTrue(fs.exists(lock.getLockFile())); + + // create another spout to take over processing and read a few lines + List res2 = runSpout(spout2, "r3"); + assertEquals(3, res2.size()); + + // check lock file presence + assertTrue(fs.exists(lock.getLockFile())); + + // check lock file contents + List contents = readTextFile(fs, lock.getLockFile().toString()); + assertFalse(contents.isEmpty()); + + // finish up reading the file + res2 = runSpout(spout2, "r2"); + assertEquals(4, res2.size()); + + // check lock file is gone + assertFalse(fs.exists(lock.getLockFile())); + FileReader rdr = getField(spout2, "reader"); + assertNull(rdr); + assertTrue(getBoolField(spout2, "fileReadCompletely")); + } + } + } + + @Test + public void testResumeAbandoned_Seq_NoAck() throws Exception { + Path file1 = new Path(source.toString() + "/file1.seq"); + createSeqFile(fs, file1, 6); + + final Integer lockExpirySec = 1; + + try (AutoCloseableHdfsSpout closeableSpout = makeSpout(Configs.SEQ, SequenceFileReader.defaultFields)) { + HdfsSpout spout = closeableSpout.spout; + spout.setCommitFrequencyCount(1); + spout.setCommitFrequencySec(1000); // effectively disable commits based on time + spout.setLockTimeoutSec(lockExpirySec); + + try (AutoCloseableHdfsSpout closeableSpout2 = makeSpout(Configs.SEQ, SequenceFileReader.defaultFields)) { + HdfsSpout spout2 = closeableSpout2.spout; + spout2.setCommitFrequencyCount(1); + spout2.setCommitFrequencySec(1000); // effectively disable commits based on time + spout2.setLockTimeoutSec(lockExpirySec); + + Map conf = getCommonConfigs(); + openSpout(spout, 0, conf); + openSpout(spout2, 1, conf); + + // consume file 1 partially + List res = runSpout(spout, "r2"); + assertEquals(2, res.size()); + // abandon file + FileLock lock = getField(spout, "lock"); + TestFileLock.closeUnderlyingLockFile(lock); + Thread.sleep(lockExpirySec * 2 * 1000); + + // check lock file presence + assertTrue(fs.exists(lock.getLockFile())); + + // create another spout to take over processing and read a few lines + List res2 = runSpout(spout2, "r3"); + assertEquals(3, res2.size()); + + // check lock file presence + assertTrue(fs.exists(lock.getLockFile())); + + // check lock file contents + List contents = getTextFileContents(fs, lock.getLockFile()); + assertFalse(contents.isEmpty()); + + // finish up reading the file + res2 = runSpout(spout2, "r3"); + assertEquals(4, res2.size()); + + // check lock file is gone + assertFalse(fs.exists(lock.getLockFile())); + FileReader rdr = getField(spout2, "reader"); + assertNull(rdr); + assertTrue(getBoolField(spout2, "fileReadCompletely")); + } + } + } + + private void checkCollectorOutput_txt(MockCollector collector, Path... txtFiles) throws IOException { + ArrayList expected = new ArrayList<>(); + for (Path txtFile : txtFiles) { + List lines = getTextFileContents(fs, txtFile); + expected.addAll(lines); + } + + List actual = new ArrayList<>(); + for (Pair> item : collector.items) { + actual.add(item.getValue().get(0).toString()); + } + assertEquals(expected, actual); + } + + private List getTextFileContents(FileSystem fs, Path txtFile) throws IOException { + ArrayList result = new ArrayList<>(); + FSDataInputStream istream = fs.open(txtFile); + InputStreamReader isreader = new InputStreamReader(istream, "UTF-8"); + BufferedReader reader = new BufferedReader(isreader); + + for (String line = reader.readLine(); line != null; line = reader.readLine()) { + result.add(line); + } + isreader.close(); + return result; + } + + private void checkCollectorOutput_seq(MockCollector collector, Path... seqFiles) throws IOException { + ArrayList expected = new ArrayList<>(); + for (Path seqFile : seqFiles) { + List lines = getSeqFileContents(fs, seqFile); + expected.addAll(lines); + } + assertTrue(expected.equals(collector.lines)); + } + + private List getSeqFileContents(FileSystem fs, Path... seqFiles) throws IOException { + ArrayList result = new ArrayList<>(); + + for (Path seqFile : seqFiles) { + Path file = new Path(fs.getUri().toString() + seqFile.toString()); + SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(file)); + try { + Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf); + Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf); + while (reader.next(key, value)) { + String keyValStr = Arrays.asList(key, value).toString(); + result.add(keyValStr); + } + } finally { + reader.close(); + } + }// for + return result; + } + + private List listDir(Path p) throws IOException { + ArrayList result = new ArrayList<>(); + RemoteIterator fileNames = fs.listFiles(p, false); + while (fileNames.hasNext()) { + LocatedFileStatus fileStatus = fileNames.next(); + result.add(Path.getPathWithoutSchemeAndAuthority(fileStatus.getPath()).toString()); + } + return result; + } + + @Test + public void testMultipleFileConsumption_Ack() throws Exception { + Path file1 = new Path(source.toString() + "/file1.txt"); + createTextFile(file1, 5); + + try (AutoCloseableHdfsSpout closeableSpout = makeSpout(Configs.TEXT, TextFileReader.defaultFields)) { + HdfsSpout spout = closeableSpout.spout; + spout.setCommitFrequencyCount(1); + spout.setCommitFrequencySec(1); + + Map conf = getCommonConfigs(); + conf.put(Config.TOPOLOGY_ACKER_EXECUTORS, "1"); // enable ACKing + openSpout(spout, 0, conf); + + // read few lines from file1 dont ack + runSpout(spout, "r3"); + FileReader reader = getField(spout, "reader"); + assertNotNull(reader); + assertFalse(getBoolField(spout, "fileReadCompletely")); + + // read remaining lines + runSpout(spout, "r3"); + reader = getField(spout, "reader"); + assertNotNull(reader); + assertTrue(getBoolField(spout, "fileReadCompletely")); + + // ack few + runSpout(spout, "a0", "a1", "a2"); + reader = getField(spout, "reader"); + assertNotNull(reader); + assertTrue(getBoolField(spout, "fileReadCompletely")); + + //ack rest + runSpout(spout, "a3", "a4"); + reader = getField(spout, "reader"); + assertNull(reader); + assertTrue(getBoolField(spout, "fileReadCompletely")); + + // go to next file + Path file2 = new Path(source.toString() + "/file2.txt"); + createTextFile(file2, 5); + + // Read 1 line + runSpout(spout, "r1"); + assertNotNull(getField(spout, "reader")); + assertFalse(getBoolField(spout, "fileReadCompletely")); + + // ack 1 tuple + runSpout(spout, "a5"); + assertNotNull(getField(spout, "reader")); + assertFalse(getBoolField(spout, "fileReadCompletely")); + + // read and ack remaining lines + runSpout(spout, "r5", "a6", "a7", "a8", "a9"); + assertNull(getField(spout, "reader")); + assertTrue(getBoolField(spout, "fileReadCompletely")); + } + } + + @Test + public void testSimpleSequenceFile() throws Exception { + //1) create a couple files to consume + source = new Path("/tmp/hdfsspout/source"); + fs.mkdirs(source); + archive = new Path("/tmp/hdfsspout/archive"); + fs.mkdirs(archive); + + Path file1 = new Path(source + "/file1.seq"); + createSeqFile(fs, file1, 5); + + Path file2 = new Path(source + "/file2.seq"); + createSeqFile(fs, file2, 5); + + try (AutoCloseableHdfsSpout closeableSpout = makeSpout(Configs.SEQ, SequenceFileReader.defaultFields)) { + HdfsSpout spout = closeableSpout.spout; + Map conf = getCommonConfigs(); + openSpout(spout, 0, conf); + + // consume both files + List res = runSpout(spout, "r11"); + assertEquals(10, res.size()); + + assertEquals(2, listDir(archive).size()); + + Path f1 = new Path(archive + "/file1.seq"); + Path f2 = new Path(archive + "/file2.seq"); + + checkCollectorOutput_seq((MockCollector) spout.getCollector(), f1, f2); + } + } + + @Test + public void testReadFailures() throws Exception { + // 1) create couple of input files to read + Path file1 = new Path(source.toString() + "/file1.txt"); + Path file2 = new Path(source.toString() + "/file2.txt"); + + createTextFile(file1, 6); + createTextFile(file2, 7); + assertEquals(2, listDir(source).size()); + + // 2) run spout + try ( + AutoCloseableHdfsSpout closeableSpout = makeSpout(MockTextFailingReader.class.getName(), MockTextFailingReader.defaultFields)) { + HdfsSpout spout = closeableSpout.spout; + Map conf = getCommonConfigs(); + openSpout(spout, 0, conf); + + List res = runSpout(spout, "r11"); + String[] expected = new String[]{ "[line 0]", "[line 1]", "[line 2]", "[line 0]", "[line 1]", "[line 2]" }; + assertArrayEquals(expected, res.toArray()); + + // 3) make sure 6 lines (3 from each file) were read in all + assertEquals(((MockCollector) spout.getCollector()).lines.size(), 6); + ArrayList badFiles = HdfsUtils.listFilesByModificationTime(fs, badfiles, 0); + assertEquals(badFiles.size(), 2); + } + } + + // check lock creation/deletion and contents + @Test + public void testLocking() throws Exception { + Path file1 = new Path(source.toString() + "/file1.txt"); + createTextFile(file1, 10); + + // 0) config spout to log progress in lock file for each tuple + try (AutoCloseableHdfsSpout closeableSpout = makeSpout(Configs.TEXT, TextFileReader.defaultFields)) { + HdfsSpout spout = closeableSpout.spout; + spout.setCommitFrequencyCount(1); + spout.setCommitFrequencySec(1000); // effectively disable commits based on time + + Map conf = getCommonConfigs(); + openSpout(spout, 0, conf); + + // 1) read initial lines in file, then check if lock exists + List res = runSpout(spout, "r5"); + assertEquals(5, res.size()); + List lockFiles = listDir(spout.getLockDirPath()); + assertEquals(1, lockFiles.size()); + + // 2) check log file content line count == tuples emitted + 1 + List lines = readTextFile(fs, lockFiles.get(0)); + assertEquals(lines.size(), res.size() + 1); + + // 3) read remaining lines in file, then ensure lock is gone + runSpout(spout, "r6"); + lockFiles = listDir(spout.getLockDirPath()); + assertEquals(0, lockFiles.size()); + + // 4) --- Create another input file and reverify same behavior --- + Path file2 = new Path(source.toString() + "/file2.txt"); + createTextFile(file2, 10); + + // 5) read initial lines in file, then check if lock exists + res = runSpout(spout, "r5"); + assertEquals(15, res.size()); + lockFiles = listDir(spout.getLockDirPath()); + assertEquals(1, lockFiles.size()); + + // 6) check log file content line count == tuples emitted + 1 + lines = readTextFile(fs, lockFiles.get(0)); + assertEquals(6, lines.size()); + + // 7) read remaining lines in file, then ensure lock is gone + runSpout(spout, "r6"); + lockFiles = listDir(spout.getLockDirPath()); + assertEquals(0, lockFiles.size()); + } + } + + @Test + public void testLockLoggingFreqCount() throws Exception { + Path file1 = new Path(source.toString() + "/file1.txt"); + createTextFile(file1, 10); + + // 0) config spout to log progress in lock file for each tuple + try (AutoCloseableHdfsSpout closeableSpout = makeSpout(Configs.TEXT, TextFileReader.defaultFields)) { + HdfsSpout spout = closeableSpout.spout; + spout.setCommitFrequencyCount(2); // 1 lock log entry every 2 tuples + spout.setCommitFrequencySec(1000); // Effectively disable commits based on time + + Map conf = getCommonConfigs(); + openSpout(spout, 0, conf); + + // 1) read 5 lines in file, + runSpout(spout, "r5"); + + // 2) check log file contents + String lockFile = listDir(spout.getLockDirPath()).get(0); + List lines = readTextFile(fs, lockFile); + assertEquals(lines.size(), 3); + + // 3) read 6th line and see if another log entry was made + runSpout(spout, "r1"); + lines = readTextFile(fs, lockFile); + assertEquals(lines.size(), 4); + } + } + + @Test + public void testLockLoggingFreqSec() throws Exception { + Path file1 = new Path(source.toString() + "/file1.txt"); + createTextFile(file1, 10); + + // 0) config spout to log progress in lock file for each tuple + try (AutoCloseableHdfsSpout closeableSpout = makeSpout(Configs.TEXT, TextFileReader.defaultFields)) { + HdfsSpout spout = closeableSpout.spout; + spout.setCommitFrequencyCount(0); // disable it + spout.setCommitFrequencySec(2); // log every 2 sec + + Map conf = getCommonConfigs(); + openSpout(spout, 0, conf); + + // 1) read 5 lines in file + runSpout(spout, "r5"); + + // 2) check log file contents + String lockFile = listDir(spout.getLockDirPath()).get(0); + List lines = readTextFile(fs, lockFile); + assertEquals(lines.size(), 1); + Thread.sleep(3000); // allow freq_sec to expire + + // 3) read another line and see if another log entry was made + runSpout(spout, "r1"); + lines = readTextFile(fs, lockFile); + assertEquals(2, lines.size()); + } + } + + private Map getCommonConfigs() { + Map topoConf = new HashMap<>(); + topoConf.put(Config.TOPOLOGY_ACKER_EXECUTORS, "0"); + return topoConf; + } + + private AutoCloseableHdfsSpout makeSpout(String readerType, String[] outputFields) { + HdfsSpout spout = new HdfsSpout().withOutputFields(outputFields) + .setReaderType(readerType) + .setHdfsUri(DFS_CLUSTER_EXTENSION.getDfscluster().getURI().toString()) + .setSourceDir(source.toString()) + .setArchiveDir(archive.toString()) + .setBadFilesDir(badfiles.toString()); + + return new AutoCloseableHdfsSpout(spout); + } + + private void openSpout(HdfsSpout spout, int spoutId, Map topoConf) { + MockCollector collector = new MockCollector(); + spout.open(topoConf, new MockTopologyContext(spoutId, topoConf), collector); + } + + /** + * Execute a sequence of calls on HdfsSpout. + * + * @param cmds: set of commands to run, e.g. "r,r,r,r,a1,f2,...". The commands are: r[N] - receive() called N times aN - ack, item + * number: N fN - fail, item number: N + */ + private List runSpout(HdfsSpout spout, String... cmds) { + MockCollector collector = (MockCollector) spout.getCollector(); + for (String cmd : cmds) { + if (cmd.startsWith("r")) { + int count = 1; + if (cmd.length() > 1) { + count = Integer.parseInt(cmd.substring(1)); + } + for (int i = 0; i < count; ++i) { + spout.nextTuple(); + } + } else if (cmd.startsWith("a")) { + int n = Integer.parseInt(cmd.substring(1)); + Pair> item = collector.items.get(n); + spout.ack(item.getKey()); + } else if (cmd.startsWith("f")) { + int n = Integer.parseInt(cmd.substring(1)); + Pair> item = collector.items.get(n); + spout.fail(item.getKey()); + } + } + return collector.lines; + } + + private void createTextFile(Path file, int lineCount) throws IOException { + FSDataOutputStream os = fs.create(file); + for (int i = 0; i < lineCount; i++) { + os.writeBytes("line " + i + System.lineSeparator()); + } + os.close(); + } + + private static class AutoCloseableHdfsSpout implements AutoCloseable { + + private final HdfsSpout spout; + + public AutoCloseableHdfsSpout(HdfsSpout spout) { + this.spout = spout; + } + + @Override + public void close() throws Exception { + spout.close(); + } + } + + static class MockCollector extends SpoutOutputCollector { + //comma separated offsets + + public ArrayList lines; + public ArrayList>> items; + + public MockCollector() { + super(null); + lines = new ArrayList<>(); + items = new ArrayList<>(); + } + + @Override + public List emit(List tuple, Object messageId) { + lines.add(tuple.toString()); + items.add(HdfsUtils.Pair.of(messageId, tuple)); + return null; + } + + @Override + public List emit(String streamId, List tuple, Object messageId) { + return emit(tuple, messageId); + } + + @Override + public void emitDirect(int arg0, String arg1, List arg2, Object arg3) { + throw new UnsupportedOperationException("NOT Implemented"); + } + + @Override + public void reportError(Throwable arg0) { + throw new UnsupportedOperationException("NOT Implemented"); + } + + @Override + public long getPendingCount() { + return 0; + } + } // class MockCollector + + // Throws IOExceptions for 3rd & 4th call to next(), succeeds on 5th, thereafter + // throws ParseException. Effectively produces 3 lines (1,2 & 3) from each file read + static class MockTextFailingReader extends TextFileReader { + + public static final String[] defaultFields = { "line" }; + int readAttempts = 0; + + public MockTextFailingReader(FileSystem fs, Path file, Map conf) throws IOException { + super(fs, file, conf); + } + + @Override + public List next() throws IOException, ParseException { + readAttempts++; + if (readAttempts == 3 || readAttempts == 4) { + throw new IOException("mock test exception"); + } else if (readAttempts > 5) { + throw new ParseException("mock test exception", null); + } + return super.next(); + } + } + + static class MockTopologyContext extends TopologyContext { + + private final int componentId; + + public MockTopologyContext(int componentId, Map topoConf) { + super(null, topoConf, null, null, null, null, null, null, null, 0, 0, null, null, null, null, null, null, null); + this.componentId = componentId; + } + + @Override + public String getThisComponentId() { + return Integer.toString(componentId); + } + + } + +} diff --git a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/spout/TestProgressTracker.java b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/spout/TestProgressTracker.java new file mode 100644 index 00000000000..fbcd67449d9 --- /dev/null +++ b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/spout/TestProgressTracker.java @@ -0,0 +1,114 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.spout; + +import java.io.File; +import java.io.IOException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; + +import static org.junit.jupiter.api.Assertions.*; + +public class TestProgressTracker { + + @TempDir + public File tempFolder; + public File baseFolder; + private FileSystem fs; + private Configuration conf = new Configuration(); + + @BeforeEach + public void setUp() throws Exception { + fs = FileSystem.getLocal(conf); + } + + @Test + public void testBasic() throws Exception { + ProgressTracker tracker = new ProgressTracker(); + baseFolder = new File(tempFolder, "trackertest"); + baseFolder.mkdir(); + + Path file = new Path(baseFolder.toString() + Path.SEPARATOR + "testHeadTrimming.txt"); + createTextFile(file, 10); + + // create reader and do some checks + TextFileReader reader = new TextFileReader(fs, file, null); + FileOffset pos0 = tracker.getCommitPosition(); + assertNull(pos0); + + TextFileReader.Offset currOffset = reader.getFileOffset(); + assertNotNull(currOffset); + assertEquals(0, currOffset.charOffset); + + // read 1st line and ack + assertNotNull(reader.next()); + TextFileReader.Offset pos1 = reader.getFileOffset(); + tracker.recordAckedOffset(pos1); + + TextFileReader.Offset pos1b = (TextFileReader.Offset) tracker.getCommitPosition(); + assertEquals(pos1, pos1b); + + // read 2nd line and ACK + assertNotNull(reader.next()); + TextFileReader.Offset pos2 = reader.getFileOffset(); + tracker.recordAckedOffset(pos2); + + tracker.dumpState(System.err); + TextFileReader.Offset pos2b = (TextFileReader.Offset) tracker.getCommitPosition(); + assertEquals(pos2, pos2b); + + // read lines 3..7, don't ACK .. commit pos should remain same + assertNotNull(reader.next());//3 + TextFileReader.Offset pos3 = reader.getFileOffset(); + assertNotNull(reader.next());//4 + TextFileReader.Offset pos4 = reader.getFileOffset(); + assertNotNull(reader.next());//5 + TextFileReader.Offset pos5 = reader.getFileOffset(); + assertNotNull(reader.next());//6 + TextFileReader.Offset pos6 = reader.getFileOffset(); + assertNotNull(reader.next());//7 + TextFileReader.Offset pos7 = reader.getFileOffset(); + + // now ack msg 5 and check + tracker.recordAckedOffset(pos5); + assertEquals(pos2, tracker.getCommitPosition()); // should remain unchanged @ 2 + tracker.recordAckedOffset(pos4); + assertEquals(pos2, tracker.getCommitPosition()); // should remain unchanged @ 2 + tracker.recordAckedOffset(pos3); + assertEquals(pos5, tracker.getCommitPosition()); // should be at 5 + + tracker.recordAckedOffset(pos6); + assertEquals(pos6, tracker.getCommitPosition()); // should be at 6 + tracker.recordAckedOffset(pos6); // double ack on same msg + assertEquals(pos6, tracker.getCommitPosition()); // should still be at 6 + + tracker.recordAckedOffset(pos7); + assertEquals(pos7, tracker.getCommitPosition()); // should be at 7 + + tracker.dumpState(System.err); + } + + private void createTextFile(Path file, int lineCount) throws IOException { + try (FSDataOutputStream os = fs.create(file)) { + for (int i = 0; i < lineCount; i++) { + os.writeBytes("line " + i + System.lineSeparator()); + } + } + } + +} diff --git a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/testing/MiniDFSClusterExtension.java b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/testing/MiniDFSClusterExtension.java new file mode 100644 index 00000000000..93a0eab106b --- /dev/null +++ b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/testing/MiniDFSClusterExtension.java @@ -0,0 +1,97 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.storm.hdfs.testing; + +import java.io.File; +import java.util.function.Supplier; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.junit.jupiter.api.extension.AfterEachCallback; +import org.junit.jupiter.api.extension.BeforeEachCallback; +import org.junit.jupiter.api.extension.ExtensionContext; + +import static org.apache.hadoop.test.GenericTestUtils.DEFAULT_TEST_DATA_DIR; +import static org.apache.hadoop.test.GenericTestUtils.SYSPROP_TEST_DATA_DIR; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class MiniDFSClusterExtension implements BeforeEachCallback, AfterEachCallback { + + private static final String TEST_BUILD_DATA = "test.build.data"; + + private final Supplier hadoopConfSupplier; + private Configuration hadoopConf; + private MiniDFSCluster dfscluster; + + public MiniDFSClusterExtension() { + this(() -> new Configuration()); + } + + public MiniDFSClusterExtension(Supplier hadoopConfSupplier) { + this.hadoopConfSupplier = hadoopConfSupplier; + } + + public Configuration getHadoopConf() { + return hadoopConf; + } + + public MiniDFSCluster getDfscluster() { + return dfscluster; + } + + @Override + public void beforeEach(ExtensionContext arg0) throws Exception { + System.setProperty(TEST_BUILD_DATA, "target/test/data"); + hadoopConf = hadoopConfSupplier.get(); + String tempDir = getTestDir("dfs").getAbsolutePath() + File.separator; + hadoopConf.set("hdfs.minidfs.basedir", tempDir); + dfscluster = new MiniDFSCluster.Builder(hadoopConf).numDataNodes(3).build(); + dfscluster.waitActive(); + } + + @Override + public void afterEach(ExtensionContext arg0) throws Exception { + dfscluster.shutdown(); + System.clearProperty(TEST_BUILD_DATA); + } + + /** + * Get an uncreated directory for tests. + * We use this method to get rid of getTestDir() in GenericTestUtils in Hadoop code + * which uses assert from junit4. + * @return the absolute directory for tests. Caller is expected to create it. + */ + public static File getTestDir(String subdir) { + return new File(getTestDir(), subdir).getAbsoluteFile(); + } + + /** + * Get the (created) base directory for tests. + * @return the absolute directory + */ + public static File getTestDir() { + String prop = System.getProperty(SYSPROP_TEST_DATA_DIR, DEFAULT_TEST_DATA_DIR); + if (prop.isEmpty()) { + // corner case: property is there but empty + prop = DEFAULT_TEST_DATA_DIR; + } + File dir = new File(prop).getAbsoluteFile(); + dir.mkdirs(); + assertTrue(dir.exists(), "File " + dir + " should exist"); + return dir; + } +} diff --git a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/testing/MiniDFSClusterExtensionClassLevel.java b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/testing/MiniDFSClusterExtensionClassLevel.java new file mode 100644 index 00000000000..d13208a7c4a --- /dev/null +++ b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/testing/MiniDFSClusterExtensionClassLevel.java @@ -0,0 +1,69 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.storm.hdfs.testing; + +import java.io.File; +import java.util.function.Supplier; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.junit.jupiter.api.extension.AfterAllCallback; +import org.junit.jupiter.api.extension.BeforeAllCallback; +import org.junit.jupiter.api.extension.ExtensionContext; + +import static org.apache.storm.hdfs.testing.MiniDFSClusterExtension.getTestDir; + +public class MiniDFSClusterExtensionClassLevel implements BeforeAllCallback, AfterAllCallback { + + private static final String TEST_BUILD_DATA = "test.build.data"; + + private final Supplier hadoopConfSupplier; + private Configuration hadoopConf; + private MiniDFSCluster dfscluster; + + public MiniDFSClusterExtensionClassLevel() { + this(() -> new Configuration()); + } + + public MiniDFSClusterExtensionClassLevel(Supplier hadoopConfSupplier) { + this.hadoopConfSupplier = hadoopConfSupplier; + } + + public Configuration getHadoopConf() { + return hadoopConf; + } + + public MiniDFSCluster getDfscluster() { + return dfscluster; + } + + @Override + public void beforeAll(ExtensionContext arg0) throws Exception { + System.setProperty(TEST_BUILD_DATA, "target/test/data"); + hadoopConf = hadoopConfSupplier.get(); + String tempDir = getTestDir("dfs").getAbsolutePath() + File.separator; + hadoopConf.set("hdfs.minidfs.basedir", tempDir); + dfscluster = new MiniDFSCluster.Builder(hadoopConf).numDataNodes(3).build(); + dfscluster.waitActive(); + } + + @Override + public void afterAll(ExtensionContext arg0) throws Exception { + dfscluster.shutdown(); + System.clearProperty(TEST_BUILD_DATA); + } +} diff --git a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/trident/HdfsStateTest.java b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/trident/HdfsStateTest.java new file mode 100644 index 00000000000..8c02191b4b8 --- /dev/null +++ b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/trident/HdfsStateTest.java @@ -0,0 +1,217 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.trident; + +import java.io.File; +import java.io.IOException; +import java.nio.charset.Charset; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import org.apache.commons.io.FileUtils; +import org.apache.storm.Config; +import org.apache.storm.hdfs.trident.format.DelimitedRecordFormat; +import org.apache.storm.hdfs.trident.format.FileNameFormat; +import org.apache.storm.hdfs.trident.format.RecordFormat; +import org.apache.storm.hdfs.trident.rotation.FileRotationPolicy; +import org.apache.storm.hdfs.trident.rotation.FileSizeRotationPolicy; +import org.apache.storm.trident.tuple.TridentTuple; +import org.apache.storm.tuple.Fields; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + + +public class HdfsStateTest { + + private static final String TEST_OUT_DIR = Paths.get(System.getProperty("java.io.tmpdir"), "trident-unit-test").toString(); + + private static final String FILE_NAME_PREFIX = "hdfs-data-"; + private static final String TEST_TOPOLOGY_NAME = "test-topology"; + private static final String INDEX_FILE_PREFIX = ".index."; + private final TestFileNameFormat fileNameFormat = new TestFileNameFormat(); + + private HdfsState createHdfsState() { + + Fields hdfsFields = new Fields("f1"); + + RecordFormat recordFormat = new DelimitedRecordFormat().withFields(hdfsFields); + + FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, FileSizeRotationPolicy.Units.MB); + + HdfsState.Options options = new HdfsState.HdfsFileOptions() + .withFileNameFormat(fileNameFormat) + .withRecordFormat(recordFormat) + .withRotationPolicy(rotationPolicy) + .withFsUrl("file://" + TEST_OUT_DIR); + + Map conf = new HashMap<>(); + conf.put(Config.TOPOLOGY_NAME, TEST_TOPOLOGY_NAME); + + HdfsState state = new HdfsState(options); + state.prepare(conf, null, 0, 1); + return state; + } + + private List createMockTridentTuples(int count) { + TridentTuple tuple = mock(TridentTuple.class); + when(tuple.getValueByField(anyString())).thenReturn("data"); + List tuples = new ArrayList<>(); + for (int i = 0; i < count; i++) { + tuples.add(tuple); + } + return tuples; + } + + private List getLinesFromCurrentDataFile() throws IOException { + Path dataFile = Paths.get(TEST_OUT_DIR, fileNameFormat.getCurrentFileName()); + return Files.readAllLines(dataFile, Charset.defaultCharset()); + } + + @BeforeEach + public void setUp() { + FileUtils.deleteQuietly(new File(TEST_OUT_DIR)); + } + + @Test + public void testPrepare() { + HdfsState state = createHdfsState(); + Collection files = FileUtils.listFiles(new File(TEST_OUT_DIR), null, false); + File hdfsDataFile = Paths.get(TEST_OUT_DIR, FILE_NAME_PREFIX + "0").toFile(); + assertTrue(files.contains(hdfsDataFile)); + } + + @Test + public void testIndexFileCreation() { + HdfsState state = createHdfsState(); + state.beginCommit(1L); + Collection files = FileUtils.listFiles(new File(TEST_OUT_DIR), null, false); + File hdfsIndexFile = Paths.get(TEST_OUT_DIR, INDEX_FILE_PREFIX + TEST_TOPOLOGY_NAME + ".0").toFile(); + assertTrue(files.contains(hdfsIndexFile)); + } + + @Test + public void testUpdateState() throws Exception { + HdfsState state = createHdfsState(); + state.beginCommit(1L); + int tupleCount = 100; + state.updateState(createMockTridentTuples(tupleCount), null); + state.commit(1L); + state.close(); + List lines = getLinesFromCurrentDataFile(); + List expected = new ArrayList<>(); + for (int i = 0; i < tupleCount; i++) { + expected.add("data"); + } + assertEquals(tupleCount, lines.size()); + assertEquals(expected, lines); + } + + @Test + public void testRecoverOneBatch() throws Exception { + HdfsState state = createHdfsState(); + // batch 1 is played with 25 tuples initially. + state.beginCommit(1L); + state.updateState(createMockTridentTuples(25), null); + // batch 1 is replayed with 50 tuples. + int replayBatchSize = 50; + state.beginCommit(1L); + state.updateState(createMockTridentTuples(replayBatchSize), null); + state.commit(1L); + // close the state to force flush + state.close(); + // Ensure that the original batch1 is discarded and new one is persisted. + List lines = getLinesFromCurrentDataFile(); + assertEquals(replayBatchSize, lines.size()); + List expected = new ArrayList<>(); + for (int i = 0; i < replayBatchSize; i++) { + expected.add("data"); + } + assertEquals(expected, lines); + } + + @Test + public void testRecoverMultipleBatches() throws Exception { + HdfsState state = createHdfsState(); + + // batch 1 + int batch1Count = 10; + state.beginCommit(1L); + state.updateState(createMockTridentTuples(batch1Count), null); + state.commit(1L); + + // batch 2 + int batch2Count = 20; + state.beginCommit(2L); + state.updateState(createMockTridentTuples(batch2Count), null); + state.commit(2L); + + // batch 3 + int batch3Count = 30; + state.beginCommit(3L); + state.updateState(createMockTridentTuples(batch3Count), null); + state.commit(3L); + + // batch 3 replayed with 40 tuples + int batch3ReplayCount = 40; + state.beginCommit(3L); + state.updateState(createMockTridentTuples(batch3ReplayCount), null); + state.commit(3L); + state.close(); + /* + * total tuples should be + * recovered (batch-1 + batch-2) + replayed (batch-3) + */ + List lines = getLinesFromCurrentDataFile(); + int preReplayCount = batch1Count + batch2Count + batch3Count; + int expectedTupleCount = batch1Count + batch2Count + batch3ReplayCount; + + assertNotEquals(preReplayCount, lines.size()); + assertEquals(expectedTupleCount, lines.size()); + } + + private static class TestFileNameFormat implements FileNameFormat { + private String currentFileName = ""; + + @Override + public void prepare(Map conf, int partitionIndex, int numPartitions) { + + } + + @Override + public String getName(long rotation, long timeStamp) { + currentFileName = FILE_NAME_PREFIX + rotation; + return currentFileName; + } + + @Override + public String getPath() { + return TEST_OUT_DIR; + } + + public String getCurrentFileName() { + return currentFileName; + } + } +} diff --git a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/trident/format/TestSimpleFileNameFormat.java b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/trident/format/TestSimpleFileNameFormat.java new file mode 100644 index 00000000000..cd40a278ffe --- /dev/null +++ b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/trident/format/TestSimpleFileNameFormat.java @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.hdfs.trident.format; + +import java.net.UnknownHostException; +import java.text.SimpleDateFormat; +import org.apache.storm.utils.Utils; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class TestSimpleFileNameFormat { + + @Test + public void testDefaults() { + SimpleFileNameFormat format = new SimpleFileNameFormat(); + format.prepare(null, 3, 5); + long now = System.currentTimeMillis(); + String path = format.getPath(); + String name = format.getName(1, now); + + assertEquals("/storm", path); + String time = new SimpleDateFormat("yyyyMMddHHmmss").format(now); + assertEquals(time + ".1.txt", name); + } + + @Test + public void testParameters() { + SimpleFileNameFormat format = new SimpleFileNameFormat() + .withName("$TIME.$HOST.$PARTITION.$NUM.txt") + .withPath("/mypath") + .withTimeFormat("yyyy-MM-dd HH:mm:ss"); + format.prepare(null, 3, 5); + long now = System.currentTimeMillis(); + String path = format.getPath(); + String name = format.getName(1, now); + + assertEquals("/mypath", path); + String time = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(now); + String host = null; + try { + host = Utils.localHostname(); + } catch (UnknownHostException e) { + e.printStackTrace(); + } + assertEquals(time + "." + host + ".3.1.txt", name); + } + + @Test + public void testTimeFormat() { + assertThrows(IllegalArgumentException.class, () -> {SimpleFileNameFormat format = new SimpleFileNameFormat() + .withTimeFormat("xyz");}); + } +} diff --git a/external/storm-hdfs/src/test/resources/FixedAvroSerializer.config b/external/storm-hdfs/src/test/resources/FixedAvroSerializer.config new file mode 100644 index 00000000000..971d4115d5d --- /dev/null +++ b/external/storm-hdfs/src/test/resources/FixedAvroSerializer.config @@ -0,0 +1,2 @@ +{"type":"record", "name":"stormtest1", "fields":[{"name":"foo1","type":"string"}, {"name":"int1", "type":"int" }]} +{"type":"record", "name":"stormtest2", "fields":[{"name":"foobar1","type":"string"}, {"name":"intint1", "type":"int" }]} \ No newline at end of file diff --git a/external/storm-hdfs/src/test/resources/log4j.properties b/external/storm-hdfs/src/test/resources/log4j.properties new file mode 100644 index 00000000000..c952abd128f --- /dev/null +++ b/external/storm-hdfs/src/test/resources/log4j.properties @@ -0,0 +1,23 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +log4j.rootLogger = WARN, out + +log4j.appender.out = org.apache.log4j.ConsoleAppender +log4j.appender.out.layout = org.apache.log4j.PatternLayout +log4j.appender.out.layout.ConversionPattern = %d (%t) [%p - %l] %m%n \ No newline at end of file diff --git a/external/storm-hdfs/src/test/resources/log4j2.xml b/external/storm-hdfs/src/test/resources/log4j2.xml new file mode 100755 index 00000000000..546b1b38086 --- /dev/null +++ b/external/storm-hdfs/src/test/resources/log4j2.xml @@ -0,0 +1,32 @@ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/external/storm-jdbc/README.md b/external/storm-jdbc/README.md new file mode 100644 index 00000000000..d1b9d5a6997 --- /dev/null +++ b/external/storm-jdbc/README.md @@ -0,0 +1,302 @@ +#Storm JDBC +Storm/Trident integration for JDBC. This package includes the core bolts and trident states that allows a storm topology +to either insert storm tuples in a database table or to execute select queries against a database and enrich tuples +in a storm topology. + +**Note**: Throughout the examples below, we make use of com.google.common.collect.Lists and com.google.common.collect.Maps. + +## Inserting into a database. +The bolt and trident state included in this package for inserting data into a database tables are tied to a single table. + +### ConnectionProvider +An interface that should be implemented by different connection pooling mechanism `org.apache.storm.jdbc.common.ConnectionProvider` + +```java +public interface ConnectionProvider extends Serializable { + /** + * method must be idempotent. + */ + void prepare(); + + /** + * + * @return a DB connection over which the queries can be executed. + */ + Connection getConnection(); + + /** + * called once when the system is shutting down, should be idempotent. + */ + void cleanup(); +} +``` + +Out of the box we support `org.apache.storm.jdbc.common.HikariCPConnectionProvider` which is an implementation that uses HikariCP. + +###JdbcMapper +The main API for inserting data in a table using JDBC is the `org.apache.storm.jdbc.mapper.JdbcMapper` interface: + +```java +public interface JdbcMapper extends Serializable { + List getColumns(ITuple tuple); +} +``` + +The `getColumns()` method defines how a storm tuple maps to a list of columns representing a row in a database. +**The order of the returned list is important. The place holders in the supplied queries are resolved in the same order as returned list.** +For example if the user supplied insert query is `insert into user(user_id, user_name, create_date) values (?,?, now())` the 1st item +of the returned list of `getColumns` method will map to the 1st place holder and the 2nd to the 2nd and so on. We do not parse +the supplied queries to try and resolve place holder by column names. Not making any assumptions about the query syntax allows this connector +to be used by some non-standard sql frameworks like Pheonix which only supports upsert into. + +### JdbcInsertBolt +To use the `JdbcInsertBolt`, you construct an instance of it by specifying a `ConnectionProvider` implementation +and a `JdbcMapper` implementation that converts storm tuple to DB row. In addition, you must either supply +a table name using `withTableName` method or an insert query using `withInsertQuery`. +If you specify a insert query you should ensure that your `JdbcMapper` implementation will return a list of columns in the same order as in your insert query. +You can optionally specify a query timeout seconds param that specifies max seconds an insert query can take. +The default is set to value of topology.message.timeout.secs and a value of -1 will indicate not to set any query timeout. +You should set the query timeout value to be <= topology.message.timeout.secs. + + ```java +Map hikariConfigMap = Maps.newHashMap(); +hikariConfigMap.put("dataSourceClassName","com.mysql.jdbc.jdbc2.optional.MysqlDataSource"); +hikariConfigMap.put("dataSource.url", "jdbc:mysql://localhost/test"); +hikariConfigMap.put("dataSource.user","root"); +hikariConfigMap.put("dataSource.password","password"); +ConnectionProvider connectionProvider = new HikariCPConnectionProvider(hikariConfigMap); + +String tableName = "user_details"; +JdbcMapper simpleJdbcMapper = new SimpleJdbcMapper(tableName, connectionProvider); + +JdbcInsertBolt userPersistenceBolt = new JdbcInsertBolt(connectionProvider, simpleJdbcMapper) + .withTableName("user") + .withQueryTimeoutSecs(30); + Or +JdbcInsertBolt userPersistenceBolt = new JdbcInsertBolt(connectionProvider, simpleJdbcMapper) + .withInsertQuery("insert into user values (?,?)") + .withQueryTimeoutSecs(30); + ``` + +### SimpleJdbcMapper +`storm-jdbc` includes a general purpose `JdbcMapper` implementation called `SimpleJdbcMapper` that can map Storm +tuple to a Database row. `SimpleJdbcMapper` assumes that the storm tuple has fields with same name as the column name in +the database table that you intend to write to. + +To use `SimpleJdbcMapper`, you simply tell it the tableName that you want to write to and provide a connectionProvider instance. + +The following code creates a `SimpleJdbcMapper` instance that: + +1. Will allow the mapper to transform a storm tuple to a list of columns mapping to a row in table test.user_details. +2. Will use the provided HikariCP configuration to establish a connection pool with specified Database configuration and +automatically figure out the column names and corresponding data types of the table that you intend to write to. +Please see https://github.com/brettwooldridge/HikariCP#configuration-knobs-baby to learn more about hikari configuration properties. + +```java +Map hikariConfigMap = Maps.newHashMap(); +hikariConfigMap.put("dataSourceClassName","com.mysql.jdbc.jdbc2.optional.MysqlDataSource"); +hikariConfigMap.put("dataSource.url", "jdbc:mysql://localhost/test"); +hikariConfigMap.put("dataSource.user","root"); +hikariConfigMap.put("dataSource.password","password"); +ConnectionProvider connectionProvider = new HikariCPConnectionProvider(hikariConfigMap); +String tableName = "user_details"; +JdbcMapper simpleJdbcMapper = new SimpleJdbcMapper(tableName, connectionProvider); +``` +The mapper initialized in the example above assumes a storm tuple has value for all the columns of the table you intend to insert data into and its `getColumn` +method will return the columns in the order in which Jdbc connection instance's `connection.getMetaData().getColumns();` method returns them. + +**If you specified your own insert query to `JdbcInsertBolt` you must initialize `SimpleJdbcMapper` with explicit columnschema such that the schema has columns in the same order as your insert queries.** +For example if your insert query is `Insert into user (user_id, user_name) values (?,?)` then your `SimpleJdbcMapper` should be initialized with the following statements: +```java +List columnSchema = Lists.newArrayList( + new Column("user_id", java.sql.Types.INTEGER), + new Column("user_name", java.sql.Types.VARCHAR)); +JdbcMapper simpleJdbcMapper = new SimpleJdbcMapper(columnSchema); +``` + +If your storm tuple only has fields for a subset of columns i.e. if some of the columns in your table have default values and you want to only insert values for columns with no default values you can enforce the behavior by initializing the +`SimpleJdbcMapper` with explicit columnschema. For example, if you have a user_details table `create table if not exists user_details (user_id integer, user_name varchar(100), dept_name varchar(100), create_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP);` +In this table the create_time column has a default value. To ensure only the columns with no default values are inserted +you can initialize the `jdbcMapper` as below: + +```java +List columnSchema = Lists.newArrayList( + new Column("user_id", java.sql.Types.INTEGER), + new Column("user_name", java.sql.Types.VARCHAR), + new Column("dept_name", java.sql.Types.VARCHAR)); +JdbcMapper simpleJdbcMapper = new SimpleJdbcMapper(columnSchema); +``` +### JdbcTridentState +We also support a trident persistent state that can be used with trident topologies. To create a jdbc persistent trident +state you need to initialize it with the table name or an insert query, the JdbcMapper instance and connection provider instance. +See the example below: + +```java +JdbcState.Options options = new JdbcState.Options() + .withConnectionProvider(connectionProvider) + .withMapper(jdbcMapper) + .withTableName("user_details") + .withQueryTimeoutSecs(30); +JdbcStateFactory jdbcStateFactory = new JdbcStateFactory(options); +``` +similar to `JdbcInsertBolt` you can specify a custom insert query using `withInsertQuery` instead of specifying a table name. + +## Lookup from Database +We support `select` queries from databases to allow enrichment of storm tuples in a topology. The main API for +executing select queries against a database using JDBC is the `org.apache.storm.jdbc.mapper.JdbcLookupMapper` interface: + +```java + void declareOutputFields(OutputFieldsDeclarer declarer); + List getColumns(ITuple tuple); + List toTuple(ITuple input, List columns); +``` + +The `declareOutputFields` method is used to indicate what fields will be emitted as part of output tuple of processing a storm +tuple. + +The `getColumns` method specifies the place holder columns in a select query and their SQL type and the value to use. +For example in the user_details table mentioned above if you were executing a query `select user_name from user_details where +user_id = ? and create_time > ?` the `getColumns` method would take a storm input tuple and return a List containing two items. +The first instance of `Column` type's `getValue()` method will be used as the value of `user_id` to lookup for and the +second instance of `Column` type's `getValue()` method will be used as the value of `create_time`. +**Note: the order in the returned list determines the place holder's value. In other words the first item in the list maps +to first `?` in select query, the second item to second `?` in query and so on.** + +The `toTuple` method takes in the input tuple and a list of columns representing a DB row as a result of the select query +and returns a list of values to be emitted. +**Please note that it returns a list of `Values` and not just a single instance of `Values`.** +This allows a for a single DB row to be mapped to multiple output storm tuples. + +###SimpleJdbcLookupMapper +`storm-jdbc` includes a general purpose `JdbcLookupMapper` implementation called `SimpleJdbcLookupMapper`. + +To use `SimpleJdbcMapper`, you have to initialize it with the fields that will be outputted by your bolt and the list of +columns that are used in your select query as place holder. The following example shows initialization of a `SimpleJdbcLookupMapper` +that declares `user_id,user_name,create_date` as output fields and `user_id` as the place holder column in select query. +SimpleJdbcMapper assumes the field name in your tuple is equal to the place holder column name, i.e. in our example +`SimpleJdbcMapper` will look for a field `use_id` in the input tuple and use its value as the place holder's value in the +select query. For constructing output tuples, it looks for fields specified in `outputFields` in the input tuple first, +and if it is not found in input tuple then it looks at select query's output row for a column with same name as field name. +So in the example below if the input tuple had fields `user_id, create_date` and the select query was +`select user_name from user_details where user_id = ?`, For each input tuple `SimpleJdbcLookupMapper.getColumns(tuple)` +will return the value of `tuple.getValueByField("user_id")` which will be used as the value in `?` of select query. +For each output row from DB, `SimpleJdbcLookupMapper.toTuple()` will use the `user_id, create_date` from the input tuple as +is adding only `user_name` from the resulting row and returning these 3 fields as a single output tuple. + +```java +Fields outputFields = new Fields("user_id", "user_name", "create_date"); +List queryParamColumns = Lists.newArrayList(new Column("user_id", Types.INTEGER)); +this.jdbcLookupMapper = new SimpleJdbcLookupMapper(outputFields, queryParamColumns); +``` + +### JdbcLookupBolt +To use the `JdbcLookupBolt`, construct an instance of it using a `ConnectionProvider` instance, `JdbcLookupMapper` instance and the select query to execute. +You can optionally specify a query timeout seconds param that specifies max seconds the select query can take. +The default is set to value of topology.message.timeout.secs. You should set this value to be <= topology.message.timeout.secs. + +```java +String selectSql = "select user_name from user_details where user_id = ?"; +SimpleJdbcLookupMapper lookupMapper = new SimpleJdbcLookupMapper(outputFields, queryParamColumns) +JdbcLookupBolt userNameLookupBolt = new JdbcLookupBolt(connectionProvider, selectSql, lookupMapper) + .withQueryTimeoutSecs(30); +``` + +### JdbcTridentState for lookup +We also support a trident query state that can be used with trident topologies. + +```java +JdbcState.Options options = new JdbcState.Options() + .withConnectionProvider(connectionProvider) + .withJdbcLookupMapper(new SimpleJdbcLookupMapper(new Fields("user_name"), Lists.newArrayList(new Column("user_id", Types.INTEGER)))) + .withSelectQuery("select user_name from user_details where user_id = ?"); + .withQueryTimeoutSecs(30); +``` + +## Example: +A runnable example can be found in the `src/test/java/topology` directory. + +### Setup +* Ensure you have included JDBC implementation dependency for your chosen database as part of your build configuration. +* The test topologies executes the following queries so your intended DB must support these queries for test topologies +to work. +```SQL +create table if not exists user (user_id integer, user_name varchar(100), dept_name varchar(100), create_date date); +create table if not exists department (dept_id integer, dept_name varchar(100)); +create table if not exists user_department (user_id integer, dept_id integer); +insert into department values (1, 'R&D'); +insert into department values (2, 'Finance'); +insert into department values (3, 'HR'); +insert into department values (4, 'Sales'); +insert into user_department values (1, 1); +insert into user_department values (2, 2); +insert into user_department values (3, 3); +insert into user_department values (4, 4); +select dept_name from department, user_department where department.dept_id = user_department.dept_id and user_department.user_id = ?; +``` +### Execution +Run the `org.apache.storm.jdbc.topology.UserPersistenceTopology` class using storm jar command. The class expects 5 args +storm jar org.apache.storm.jdbc.topology.UserPersistenceTopology [topology name] + +To make it work with Mysql, you can add the following to the pom.xml + +``` + + mysql + mysql-connector-java + 5.1.31 + +``` + +You can generate a single jar with dependencies using mvn assembly plugin. To use the plugin add the following to your pom.xml and execute +`mvn clean compile assembly:single` + +``` + + maven-assembly-plugin + + + + fully.qualified.MainClass + + + + jar-with-dependencies + + + +``` + +Mysql Example: +``` +storm jar ~/repo/incubator-storm/external/storm-jdbc/target/storm-jdbc-0.10.0-SNAPSHOT-jar-with-dependencies.jar org.apache.storm.jdbc.topology.UserPersistenceTopology com.mysql.jdbc.jdbc2.optional.MysqlDataSource jdbc:mysql://localhost/test root password UserPersistenceTopology +``` + +You can execute a select query against the user table which should show newly inserted rows: + +``` +select * from user; +``` + +For trident you can view `org.apache.storm.jdbc.topology.UserPersistenceTridentTopology`. +## License + +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, +software distributed under the License is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied. See the License for the +specific language governing permissions and limitations +under the License. + +## Committer Sponsors + * P. Taylor Goetz ([ptgoetz@apache.org](mailto:ptgoetz@apache.org)) + * Sriharsha Chintalapani ([sriharsha@apache.org](mailto:sriharsha@apache.org)) diff --git a/external/storm-jdbc/pom.xml b/external/storm-jdbc/pom.xml new file mode 100644 index 00000000000..dda214ac014 --- /dev/null +++ b/external/storm-jdbc/pom.xml @@ -0,0 +1,88 @@ + + + + 4.0.0 + + + storm + org.apache.storm + 2.8.4-SNAPSHOT + ../../pom.xml + + + storm-jdbc + + + + Parth-Brahmbhatt + Parth Brahmbhatt + brahmbhatt.parth@gmail.com + + + + + + org.apache.storm + storm-client + ${project.version} + ${provided.scope} + + + org.apache.commons + commons-lang3 + + + com.google.guava + guava + + + com.zaxxer + HikariCP + + + org.hsqldb + hsqldb + 2.7.4 + test + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + test-jar + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + + + org.apache.maven.plugins + maven-pmd-plugin + + + + diff --git a/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/bolt/AbstractJdbcBolt.java b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/bolt/AbstractJdbcBolt.java new file mode 100644 index 00000000000..5368cbefa10 --- /dev/null +++ b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/bolt/AbstractJdbcBolt.java @@ -0,0 +1,96 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.jdbc.bolt; + +import java.sql.DriverManager; +import java.util.Map; +import org.apache.commons.lang3.Validate; +import org.apache.storm.Config; +import org.apache.storm.jdbc.common.ConnectionProvider; +import org.apache.storm.jdbc.common.JdbcClient; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.base.BaseTickTupleAwareRichBolt; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public abstract class AbstractJdbcBolt extends BaseTickTupleAwareRichBolt { + private static final Logger LOG = LoggerFactory.getLogger( + AbstractJdbcBolt.class); + + static { + /* + * Load DriverManager first to avoid any race condition between + * DriverManager static initialization block and specific driver class's + * static initialization block. e.g. PhoenixDriver + * + * We should take this workaround since prepare() method is synchronized + * but an worker can initialize multiple AbstractJdbcBolt instances and + * they would make race condition. + * + * We just need to ensure that DriverManager class is always initialized + * earlier than provider so below line should be called first + * than initializing provider. + */ + DriverManager.getDrivers(); + } + + protected OutputCollector collector; + protected transient JdbcClient jdbcClient; + protected String configKey; + protected Integer queryTimeoutSecs; + protected ConnectionProvider connectionProvider; + + /** + * Constructor. + *

+ * @param connectionProviderParam database connection provider + */ + public AbstractJdbcBolt(final ConnectionProvider connectionProviderParam) { + Validate.notNull(connectionProviderParam); + this.connectionProvider = connectionProviderParam; + } + + /** + * Subclasses should call this to ensure output collector and connection + * provider are set up, and finally jdbcClient is initialized properly. + *

+ * {@inheritDoc} + */ + @Override + public void prepare(final Map map, final TopologyContext topologyContext, + final OutputCollector outputCollector) { + this.collector = outputCollector; + + connectionProvider.prepare(); + + if (queryTimeoutSecs == null) { + String msgTimeout = map.get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS) + .toString(); + queryTimeoutSecs = Integer.parseInt(msgTimeout); + } + + this.jdbcClient = new JdbcClient(connectionProvider, queryTimeoutSecs); + } + + /** + * Cleanup. + *

+ * Subclasses should call this to ensure connection provider can be + * also cleaned up. + */ + @Override + public void cleanup() { + connectionProvider.cleanup(); + } +} diff --git a/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/bolt/JdbcInsertBolt.java b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/bolt/JdbcInsertBolt.java new file mode 100644 index 00000000000..16317370871 --- /dev/null +++ b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/bolt/JdbcInsertBolt.java @@ -0,0 +1,97 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.jdbc.bolt; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.Validate; +import org.apache.storm.jdbc.common.Column; +import org.apache.storm.jdbc.common.ConnectionProvider; +import org.apache.storm.jdbc.mapper.JdbcMapper; +import org.apache.storm.task.OutputCollector; +import org.apache.storm.task.TopologyContext; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.tuple.Tuple; + +/** + * Basic bolt for writing to any Database table. + *

+ * Note: Each JdbcInsertBolt defined in a topology is tied to a specific table. + */ +public class JdbcInsertBolt extends AbstractJdbcBolt { + + private String tableName; + private String insertQuery; + private JdbcMapper jdbcMapper; + + public JdbcInsertBolt(ConnectionProvider connectionProvider, JdbcMapper jdbcMapper) { + super(connectionProvider); + + Validate.notNull(jdbcMapper); + this.jdbcMapper = jdbcMapper; + } + + public JdbcInsertBolt withTableName(String tableName) { + if (insertQuery != null) { + throw new IllegalArgumentException("You can not specify both insertQuery and tableName."); + } + this.tableName = tableName; + return this; + } + + public JdbcInsertBolt withInsertQuery(String insertQuery) { + if (this.tableName != null) { + throw new IllegalArgumentException("You can not specify both insertQuery and tableName."); + } + this.insertQuery = insertQuery; + return this; + } + + public JdbcInsertBolt withQueryTimeoutSecs(int queryTimeoutSecs) { + this.queryTimeoutSecs = queryTimeoutSecs; + return this; + } + + @Override + public void prepare(Map map, TopologyContext topologyContext, OutputCollector collector) { + super.prepare(map, topologyContext, collector); + if (StringUtils.isBlank(tableName) && StringUtils.isBlank(insertQuery)) { + throw new IllegalArgumentException("You must supply either a tableName or an insert Query."); + } + } + + @Override + protected void process(Tuple tuple) { + try { + List columns = jdbcMapper.getColumns(tuple); + List> columnLists = new ArrayList>(); + columnLists.add(columns); + if (!StringUtils.isBlank(tableName)) { + this.jdbcClient.insert(this.tableName, columnLists); + } else { + this.jdbcClient.executeInsertQuery(this.insertQuery, columnLists); + } + this.collector.ack(tuple); + } catch (Exception e) { + this.collector.reportError(e); + this.collector.fail(tuple); + } + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) { + + } +} diff --git a/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/bolt/JdbcLookupBolt.java b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/bolt/JdbcLookupBolt.java new file mode 100644 index 00000000000..ae0610c8435 --- /dev/null +++ b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/bolt/JdbcLookupBolt.java @@ -0,0 +1,76 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.jdbc.bolt; + +import java.util.List; +import org.apache.commons.lang3.Validate; +import org.apache.storm.jdbc.common.Column; +import org.apache.storm.jdbc.common.ConnectionProvider; +import org.apache.storm.jdbc.mapper.JdbcLookupMapper; +import org.apache.storm.topology.OutputFieldsDeclarer; +import org.apache.storm.tuple.Tuple; +import org.apache.storm.tuple.Values; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Basic bolt for querying from any database. + */ +public class JdbcLookupBolt extends AbstractJdbcBolt { + private static final Logger LOG = LoggerFactory.getLogger(JdbcLookupBolt.class); + + private String selectQuery; + + private JdbcLookupMapper jdbcLookupMapper; + + public JdbcLookupBolt(ConnectionProvider connectionProvider, String selectQuery, JdbcLookupMapper jdbcLookupMapper) { + super(connectionProvider); + + Validate.notNull(selectQuery); + Validate.notNull(jdbcLookupMapper); + + this.selectQuery = selectQuery; + this.jdbcLookupMapper = jdbcLookupMapper; + } + + public JdbcLookupBolt withQueryTimeoutSecs(int queryTimeoutSecs) { + this.queryTimeoutSecs = queryTimeoutSecs; + return this; + } + + @Override + protected void process(Tuple tuple) { + try { + List columns = jdbcLookupMapper.getColumns(tuple); + List> result = jdbcClient.select(this.selectQuery, columns); + + if (result != null && result.size() != 0) { + for (List row : result) { + List values = jdbcLookupMapper.toTuple(tuple, row); + for (Values value : values) { + collector.emit(tuple, value); + } + } + } + this.collector.ack(tuple); + } catch (Exception e) { + this.collector.reportError(e); + this.collector.fail(tuple); + } + } + + @Override + public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) { + jdbcLookupMapper.declareOutputFields(outputFieldsDeclarer); + } +} diff --git a/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/common/Column.java b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/common/Column.java new file mode 100644 index 00000000000..0b3498e543c --- /dev/null +++ b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/common/Column.java @@ -0,0 +1,111 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.jdbc.common; + +import java.io.Serializable; + +/** + * A database table can be defined as a list of rows and each row can be defined as a list of columns where + * each column instance has a name, a value and a type. This class represents an instance of a column in a database + * row. For example if we have the following table named user: + *

+ *  ____________________________
+ * |    UserId  |   UserName    |
+ * |      1     |    Foo        |
+ * |      2     |    Bar        |
+ *  ----------------------------
+ * 
+ * + *

The following class can be used to represent the data in the table as + *

> rows = new ArrayList>();
+ * List row1 = Lists.newArrayList(new Column("UserId", 1, Types.INTEGER), new Column("UserName", "Foo", Types.VARCHAR))
+ * List row1 = Lists.newArrayList(new Column("UserId", 2, Types.INTEGER), new Column("UserName", "Bar", Types.VARCHAR))
+ *
+ * rows.add(row1)
+ * rows.add(row2)
+ * ]]>
+ * 
+ */ +public class Column implements Serializable { + + private String columnName; + private T val; + + /** + * The sql type(e.g. varchar, date, int) Ideally we would have an enum but java's jdbc API uses integer. + * See {@link java.sql.Types} + */ + private int sqlType; + + public Column(String columnName, T val, int sqlType) { + this.columnName = columnName; + this.val = val; + this.sqlType = sqlType; + } + + public Column(String columnName, int sqlType) { + this.columnName = columnName; + this.sqlType = sqlType; + } + + public String getColumnName() { + return columnName; + } + + public T getVal() { + return val; + } + + public int getSqlType() { + return sqlType; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof Column)) { + return false; + } + + Column column = (Column) o; + + if (sqlType != column.sqlType) { + return false; + } + if (!columnName.equals(column.columnName)) { + return false; + } + return val != null ? val.equals(column.val) : column.val == null; + + } + + @Override + public int hashCode() { + int result = columnName.hashCode(); + result = 31 * result + (val != null ? val.hashCode() : 0); + result = 31 * result + sqlType; + return result; + } + + @Override + public String toString() { + return "Column{" + + "columnName='" + columnName + '\'' + + ", val=" + val + + ", sqlType=" + sqlType + + '}'; + } +} diff --git a/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/common/ConnectionProvider.java b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/common/ConnectionProvider.java new file mode 100644 index 00000000000..39b3ddfc74e --- /dev/null +++ b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/common/ConnectionProvider.java @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.jdbc.common; + +import java.io.Serializable; +import java.sql.Connection; + +/** + * Provides a database connection. + */ +public interface ConnectionProvider extends Serializable { + /** + * method must be idempotent. + */ + void prepare(); + + /** + * Get connection. + * @return a DB connection over which the queries can be executed. + */ + Connection getConnection(); + + /** + * called once when the system is shutting down, should be idempotent. + */ + void cleanup(); +} diff --git a/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/common/HikariCPConnectionProvider.java b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/common/HikariCPConnectionProvider.java new file mode 100644 index 00000000000..ad1b80d0271 --- /dev/null +++ b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/common/HikariCPConnectionProvider.java @@ -0,0 +1,66 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.jdbc.common; + +import com.zaxxer.hikari.HikariConfig; +import com.zaxxer.hikari.HikariDataSource; +import java.sql.Connection; +import java.sql.SQLException; +import java.util.Map; +import java.util.Properties; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings("checkstyle:AbbreviationAsWordInName") +public class HikariCPConnectionProvider implements ConnectionProvider { + private static final Logger LOG = LoggerFactory.getLogger(HikariCPConnectionProvider.class); + + private Map configMap; + private transient HikariDataSource dataSource; + + public HikariCPConnectionProvider(Map configMap) { + this.configMap = configMap; + } + + @Override + public synchronized void prepare() { + if (dataSource == null) { + Properties properties = new Properties(); + properties.putAll(configMap); + HikariConfig config = new HikariConfig(properties); + if (properties.containsKey("dataSource.url")) { + LOG.info("DataSource Url: " + properties.getProperty("dataSource.url")); + } else if (config.getJdbcUrl() != null) { + LOG.info("JDBC Url: " + config.getJdbcUrl()); + } + config.setAutoCommit(false); + this.dataSource = new HikariDataSource(config); + } + } + + @Override + public Connection getConnection() { + try { + return this.dataSource.getConnection(); + } catch (SQLException e) { + throw new RuntimeException(e); + } + } + + @Override + public void cleanup() { + if (dataSource != null) { + dataSource.close(); + } + } +} diff --git a/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/common/JdbcClient.java b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/common/JdbcClient.java new file mode 100644 index 00000000000..29cc85cb5d8 --- /dev/null +++ b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/common/JdbcClient.java @@ -0,0 +1,266 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version + * 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +package org.apache.storm.jdbc.common; + +import com.google.common.base.Function; +import com.google.common.base.Joiner; +import com.google.common.collect.Collections2; +import com.google.common.collect.Lists; +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.Date; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Time; +import java.sql.Timestamp; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class JdbcClient { + private static final Logger LOG = LoggerFactory.getLogger(JdbcClient.class); + + private ConnectionProvider connectionProvider; + private int queryTimeoutSecs; + + public JdbcClient(ConnectionProvider connectionProvider, int queryTimeoutSecs) { + this.connectionProvider = connectionProvider; + this.queryTimeoutSecs = queryTimeoutSecs; + } + + public void insert(String tableName, List> columnLists) { + String query = constructInsertQuery(tableName, columnLists); + executeInsertQuery(query, columnLists); + } + + public void executeInsertQuery(String query, List> columnLists) { + Exception insertException = null; + Connection connection = null; + try { + connection = connectionProvider.getConnection(); + boolean autoCommit = connection.getAutoCommit(); + if (autoCommit) { + connection.setAutoCommit(false); + } + + LOG.debug("Executing query {}", query); + + try (PreparedStatement preparedStatement = connection.prepareStatement(query)) { + if (queryTimeoutSecs > 0) { + preparedStatement.setQueryTimeout(queryTimeoutSecs); + } + + for (List columnList : columnLists) { + setPreparedStatementParams(preparedStatement, columnList); + preparedStatement.addBatch(); + } + + int[] results = preparedStatement.executeBatch(); + if (Arrays.asList(results).contains(Statement.EXECUTE_FAILED)) { + connection.rollback(); + throw new RuntimeException("failed at least one sql statement in the batch, operation rolled back."); + } else { + try { + connection.commit(); + } catch (SQLException e) { + throw new RuntimeException("Failed to commit insert query " + query, e); + } + } + } + } catch (SQLException e) { + insertException = new RuntimeException("Failed to execute insert query " + query, e); + } catch (RuntimeException e) { + insertException = e; + } finally { + closeConnection(connection, insertException); + } + } + + private String constructInsertQuery(String tableName, List> columnLists) { + StringBuilder sb = new StringBuilder(); + sb.append("Insert into ").append(tableName).append(" ("); + Collection columnNames = Collections2.transform(columnLists.get(0), new Function() { + @Override + public String apply(Column input) { + return input.getColumnName(); + } + }); + String columns = Joiner.on(",").join(columnNames); + sb.append(columns).append(") values ( "); + + String placeHolders = StringUtils.chop(StringUtils.repeat("?,", columnNames.size())); + sb.append(placeHolders).append(")"); + + return sb.toString(); + } + + public List> select(String sqlQuery, List queryParams) { + Exception selectException = null; + Connection connection = null; + try { + connection = connectionProvider.getConnection(); + try (PreparedStatement preparedStatement = connection.prepareStatement(sqlQuery)) { + if (queryTimeoutSecs > 0) { + preparedStatement.setQueryTimeout(queryTimeoutSecs); + } + setPreparedStatementParams(preparedStatement, queryParams); + try (ResultSet resultSet = preparedStatement.executeQuery()) { + List> rows = Lists.newArrayList(); + while (resultSet.next()) { + ResultSetMetaData metaData = resultSet.getMetaData(); + int columnCount = metaData.getColumnCount(); + List row = Lists.newArrayList(); + for (int i = 1; i <= columnCount; i++) { + String columnLabel = metaData.getColumnLabel(i); + int columnType = metaData.getColumnType(i); + Class columnJavaType = Util.getJavaType(columnType); + if (columnJavaType.equals(String.class)) { + row.add(new Column(columnLabel, resultSet.getString(columnLabel), columnType)); + } else if (columnJavaType.equals(Integer.class)) { + row.add(new Column(columnLabel, resultSet.getInt(columnLabel), columnType)); + } else if (columnJavaType.equals(Double.class)) { + row.add(new Column(columnLabel, resultSet.getDouble(columnLabel), columnType)); + } else if (columnJavaType.equals(Float.class)) { + row.add(new Column(columnLabel, resultSet.getFloat(columnLabel), columnType)); + } else if (columnJavaType.equals(Short.class)) { + row.add(new Column(columnLabel, resultSet.getShort(columnLabel), columnType)); + } else if (columnJavaType.equals(Boolean.class)) { + row.add(new Column(columnLabel, resultSet.getBoolean(columnLabel), columnType)); + } else if (columnJavaType.equals(byte[].class)) { + row.add(new Column(columnLabel, resultSet.getBytes(columnLabel), columnType)); + } else if (columnJavaType.equals(Long.class)) { + row.add(new Column(columnLabel, resultSet.getLong(columnLabel), columnType)); + } else if (columnJavaType.equals(Date.class)) { + row.add(new Column(columnLabel, resultSet.getDate(columnLabel), columnType)); + } else if (columnJavaType.equals(Time.class)) { + row.add(new Column