diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
deleted file mode 100644
index add2810b..00000000
--- a/.git-blame-ignore-revs
+++ /dev/null
@@ -1,5 +0,0 @@
-# Scala Steward: Reformat with scalafmt 3.8.1
-9dd9b24328ef9e0200929265b623295bf8f7f6a3
-
-# Scala Steward: Reformat with scalafmt 3.8.6
-12ea712573e8e3e8b26a9c3bb69ad0e921b8cfbb
diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml
deleted file mode 100644
index dbc2d53a..00000000
--- a/.github/workflows/ci.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-name: CI
-on:
- push:
- pull_request:
-jobs:
- build-test:
- runs-on: ubuntu-latest
- strategy:
- fail-fast: false
- matrix:
- scala: ['2.12.18', '2.13.12', '3.3.1']
- steps:
- - name: Checkout
- uses: actions/checkout@v2
- - name: Setup Scala
- uses: olafurpg/setup-scala@v10
- with:
- java-version: "adopt@1.11"
- - name: Coursier cache
- uses: coursier/cache-action@v5
- - name: Build and test
- if: ${{ matrix.scala == '3.1.0' }}
- run: sbt ++${{ matrix.scala }} clean test
- - name: Build and test
- if: ${{ matrix.scala != '3.1.0' }}
- run: sbt ++${{ matrix.scala }} clean coverage test coverageReport && bash <(curl -s https://codecov.io/bash)
diff --git a/.github/workflows/doc.yaml b/.github/workflows/doc.yaml
deleted file mode 100644
index 0f285607..00000000
--- a/.github/workflows/doc.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
-name: Website
-
-on:
- release:
- types:
- - published
- workflow_dispatch:
-
-jobs:
- publish:
- runs-on: ubuntu-latest
- if: github.event_name != 'pull_request'
- steps:
- - uses: actions/checkout@v2
- with:
- fetch-depth: 0
- - uses: olafurpg/setup-scala@v10
- with:
- java-version: "adopt@1.11"
- - uses: olafurpg/setup-gpg@v3
- - name: Setup GIT user
- uses: fregante/setup-git-user@v1
- - name: Setup Ruby
- uses: ruby/setup-ruby@v1
- with:
- ruby-version: 2.6.6
- bundler-cache: true
- - name: Install Jekyll
- run: |
- gem install sass
- gem install activesupport -v 6.1.4.4
- gem install jekyll -v 4.0.0
- gem install nokogiri -v 1.13.10
- gem install jemoji -v 0.11.1
- gem install jekyll-sitemap -v 1.4.0
- - run: sbt docs/publishMicrosite
- env:
- GITHUB_TOKEN: ${{ secrets.ADMIN_GITHUB_TOKEN }}
diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml
deleted file mode 100644
index 00b79177..00000000
--- a/.github/workflows/release.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-name: Release
-on:
- push:
- branches: ['master']
- release:
- types:
- - published
-jobs:
- publish:
- runs-on: ubuntu-latest
- steps:
- - name: Checkout
- uses: actions/checkout@v2
- with:
- fetch-depth: 0
- - name: Setup Scala
- uses: olafurpg/setup-scala@v10
- with:
- java-version: "adopt@1.11"
- - uses: olafurpg/setup-gpg@v3
- - run: sbt ci-release
- env:
- PGP_PASSPHRASE: ${{ secrets.PGP_PASSPHRASE }}
- PGP_SECRET: ${{ secrets.PGP_SECRET }}
- SONATYPE_PASSWORD: ${{ secrets.SONATYPE_PASSWORD }}
- SONATYPE_USERNAME: ${{ secrets.SONATYPE_USERNAME }}
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
deleted file mode 100644
index 61935f2c..00000000
--- a/.gitignore
+++ /dev/null
@@ -1,12 +0,0 @@
-dist/*
-target/
-lib_managed/
-src_managed/
-project/boot/
-project/plugins/project/
-.history
-.cache
-.lib/
-.idea
-examples/externalpyproc/virtualenv
-.bloop/
diff --git a/.nojekyll b/.nojekyll
new file mode 100644
index 00000000..e69de29b
diff --git a/.scalafmt.conf b/.scalafmt.conf
deleted file mode 100644
index 463060b8..00000000
--- a/.scalafmt.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-version = 3.8.6
-runner.dialect = "scala213source3"
\ No newline at end of file
diff --git a/LICENSE b/LICENSE
deleted file mode 100644
index 261eeb9e..00000000
--- a/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/README.md b/README.md
deleted file mode 100644
index 977e1b43..00000000
--- a/README.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# [prox](https://vigoo.github.io/prox)
-
-[](https://codecov.io/gh/vigoo/prox)
-[](http://www.apache.org/licenses/LICENSE-2.0)
-[](https://index.scala-lang.org/vigoo/prox/prox-core)
-
-
-**prox** is a small library that helps you starting system processes and redirecting their input/output/error streams,
-either to files, [fs2](https://github.com/functional-streams-for-scala/fs2) streams or each other.
-
-> :warning: **Version 0.5 is a complete redesign of the library**
-
-See the [project's site](https://vigoo.github.io/prox) for documentation and examples.
-
-
-----
-
-YourKit supports open source projects with innovative and intelligent tools
-for monitoring and profiling Java and .NET applications.
-YourKit is the creator of YourKit Java Profiler,
-YourKit .NET Profiler,
-and YourKit YouMonitor.
diff --git a/api/index.html b/api/index.html
new file mode 100644
index 00000000..0ed6d313
--- /dev/null
+++ b/api/index.html
@@ -0,0 +1,2 @@
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
Ungrouped
diff --git a/api/io/github/vigoo/prox/CommonModule$ProcessLikeConfiguration.html b/api/io/github/vigoo/prox/CommonModule$ProcessLikeConfiguration.html
new file mode 100644
index 00000000..89e16b68
--- /dev/null
+++ b/api/io/github/vigoo/prox/CommonModule$ProcessLikeConfiguration.html
@@ -0,0 +1,26 @@
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
The low level operation to attach an error output to all the processes
+in the group.
The low level operation to attach an error output to all the processes
+in the group.
Use one of the other methods of this trait or the advanced interface
+represented by customizedPerProcess for convenience.
This is the place where the process group's error output type gets
+calculated using the GroupErrorRedirectionType and
+OutputRedirectionType type classes.
+
R
+ Error output grouped redirection type
OR
+ Error output redirection type
E
+ Error output type
target
+ Redirection target
groupErrorRedirectionType
+ Helper for dependent error output type
outputRedirectionType
+ Helper for dependent error output type
returns
+ Returns a new process group with all the error streams redirected and
+ the error redirection capability removed.
The low level operation to attach an output to a process
The low level operation to attach an output to a process
Use one of the other methods of this trait for convenience. This is the
+place where the output type gets calculated with a helper type class
+called OutputRedirectionType which implements the type level
+computation for figuring out O.
+
R
+ Output redirection type
target
+ Redirection target
outputRedirectionType
+ Helper for dependent output type
returns
+ Returns a new process or process group with its output redirected and
+ its output redirection capability removed.
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
The low level operation to attach an output to a process
The low level operation to attach an output to a process
Use one of the other methods of this trait for convenience. This is the
+place where the output type gets calculated with a helper type class
+called OutputRedirectionType which implements the type level
+computation for figuring out O.
+
R
+ Output redirection type
target
+ Redirection target
outputRedirectionType
+ Helper for dependent output type
returns
+ Returns a new process or process group with its output redirected and
+ its output redirection capability removed.
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
The low level operation to attach an error output to all the processes
+in the group.
The low level operation to attach an error output to all the processes
+in the group.
Use one of the other methods of this trait or the advanced interface
+represented by customizedPerProcess for convenience.
This is the place where the process group's error output type gets
+calculated using the GroupErrorRedirectionType and
+OutputRedirectionType type classes.
+
R
+ Error output grouped redirection type
OR
+ Error output redirection type
E
+ Error output type
target
+ Redirection target
groupErrorRedirectionType
+ Helper for dependent error output type
outputRedirectionType
+ Helper for dependent error output type
returns
+ Returns a new process group with all the error streams redirected and
+ the error redirection capability removed.
The low level operation to attach an output to a process
The low level operation to attach an output to a process
Use one of the other methods of this trait for convenience. This is the
+place where the output type gets calculated with a helper type class
+called OutputRedirectionType which implements the type level
+computation for figuring out O.
+
R
+ Output redirection type
target
+ Redirection target
outputRedirectionType
+ Helper for dependent output type
returns
+ Returns a new process or process group with its output redirected and
+ its output redirection capability removed.
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
The low level operation to attach an output to a process
The low level operation to attach an output to a process
Use one of the other methods of this trait for convenience. This is the
+place where the output type gets calculated with a helper type class
+called OutputRedirectionType which implements the type level
+computation for figuring out O.
+
R
+ Output redirection type
target
+ Redirection target
outputRedirectionType
+ Helper for dependent output type
returns
+ Returns a new process or process group with its output redirected and
+ its output redirection capability removed.
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
The low level operation to attach an error output to all the processes
+in the group.
The low level operation to attach an error output to all the processes
+in the group.
Use one of the other methods of this trait or the advanced interface
+represented by customizedPerProcess for convenience.
This is the place where the process group's error output type gets
+calculated using the GroupErrorRedirectionType and
+OutputRedirectionType type classes.
+
R
+ Error output grouped redirection type
OR
+ Error output redirection type
E
+ Error output type
target
+ Redirection target
groupErrorRedirectionType
+ Helper for dependent error output type
outputRedirectionType
+ Helper for dependent error output type
returns
+ Returns a new process group with all the error streams redirected and
+ the error redirection capability removed.
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
The low level operation to attach an error output to all the processes
+in the group.
The low level operation to attach an error output to all the processes
+in the group.
Use one of the other methods of this trait or the advanced interface
+represented by customizedPerProcess for convenience.
This is the place where the process group's error output type gets
+calculated using the GroupErrorRedirectionType and
+OutputRedirectionType type classes.
+
R
+ Error output grouped redirection type
OR
+ Error output redirection type
E
+ Error output type
target
+ Redirection target
groupErrorRedirectionType
+ Helper for dependent error output type
outputRedirectionType
+ Helper for dependent error output type
returns
+ Returns a new process group with all the error streams redirected and
+ the error redirection capability removed.
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
Process group is two or more processes attached to each other
This implements a pipeline of processes. The input of the first process
+and the output of the last process is redirectable with the
+RedirectableInput and RedirectableOutput traits. The processes are
+attached to each other's input/output streams, the pipe between them is
+customizable.
The error streams are also redirectable with the RedirectableErrors
+trait.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
Process group is two or more processes attached to each other
Process group is two or more processes attached to each other
This implements a pipeline of processes. The input of the first process
+and the output of the last process is redirectable with the
+RedirectableInput and RedirectableOutput traits. The processes are
+attached to each other's input/output streams, the pipe between them is
+customizable.
The error streams are also redirectable with the RedirectableErrors
+trait.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
The low level operation to attach an error output to a process
The low level operation to attach an error output to a process
Use one of the other methods of this trait for convenience. This is the
+place where the output type gets calculated with a helper type class
+called OutputRedirectionType which implements the type level
+computation for figuring out E.
+
R
+ Error output redirection type
target
+ Redirection target
outputRedirectionType
+ Helper for dependent error output type
returns
+ Returns a new process with its error output redirected and its error
+ redirection capability removed.
The low level operation to attach an output to a process
The low level operation to attach an output to a process
Use one of the other methods of this trait for convenience. This is the
+place where the output type gets calculated with a helper type class
+called OutputRedirectionType which implements the type level
+computation for figuring out O.
+
R
+ Output redirection type
target
+ Redirection target
outputRedirectionType
+ Helper for dependent output type
returns
+ Returns a new process or process group with its output redirected and
+ its output redirection capability removed.
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
The low level operation to attach an output to a process
The low level operation to attach an output to a process
Use one of the other methods of this trait for convenience. This is the
+place where the output type gets calculated with a helper type class
+called OutputRedirectionType which implements the type level
+computation for figuring out O.
+
R
+ Output redirection type
target
+ Redirection target
outputRedirectionType
+ Helper for dependent output type
returns
+ Returns a new process or process group with its output redirected and
+ its output redirection capability removed.
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
The low level operation to attach an error output to a process
The low level operation to attach an error output to a process
Use one of the other methods of this trait for convenience. This is the
+place where the output type gets calculated with a helper type class
+called OutputRedirectionType which implements the type level
+computation for figuring out E.
+
R
+ Error output redirection type
target
+ Redirection target
outputRedirectionType
+ Helper for dependent error output type
returns
+ Returns a new process with its error output redirected and its error
+ redirection capability removed.
The low level operation to attach an output to a process
The low level operation to attach an output to a process
Use one of the other methods of this trait for convenience. This is the
+place where the output type gets calculated with a helper type class
+called OutputRedirectionType which implements the type level
+computation for figuring out O.
+
R
+ Output redirection type
target
+ Redirection target
outputRedirectionType
+ Helper for dependent output type
returns
+ Returns a new process or process group with its output redirected and
+ its output redirection capability removed.
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
The low level operation to attach an output to a process
The low level operation to attach an output to a process
Use one of the other methods of this trait for convenience. This is the
+place where the output type gets calculated with a helper type class
+called OutputRedirectionType which implements the type level
+computation for figuring out O.
+
R
+ Output redirection type
target
+ Redirection target
outputRedirectionType
+ Helper for dependent output type
returns
+ Returns a new process or process group with its output redirected and
+ its output redirection capability removed.
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
The low level operation to attach an error output to a process
The low level operation to attach an error output to a process
Use one of the other methods of this trait for convenience. This is the
+place where the output type gets calculated with a helper type class
+called OutputRedirectionType which implements the type level
+computation for figuring out E.
+
R
+ Error output redirection type
target
+ Redirection target
outputRedirectionType
+ Helper for dependent error output type
returns
+ Returns a new process with its error output redirected and its error
+ redirection capability removed.
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
The low level operation to attach an error output to a process
The low level operation to attach an error output to a process
Use one of the other methods of this trait for convenience. This is the
+place where the output type gets calculated with a helper type class
+called OutputRedirectionType which implements the type level
+computation for figuring out E.
+
R
+ Error output redirection type
target
+ Redirection target
outputRedirectionType
+ Helper for dependent error output type
returns
+ Returns a new process with its error output redirected and its error
+ redirection capability removed.
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
This base trait is always extended with redirection and configuration
+capabilities represented by the traits ProcessConfiguration,
+RedirectableInput, RedirectableOutput and RedirectableError.
To create a process use the constructor in the companion object
+Process.apply.
The process specification not only encodes the process to be started but
+also how its input, output and error streams are redirected and executed.
+For this reason the effect type is also bound by the process, not just at
+execution time.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
case classSimpleProcessResult[+O, +E](exitCode: Prox.ProxExitCode, output: O, error: E) extends Prox.ProcessResult[O, E] with Product with Serializable
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
This base trait is always extended with redirection and configuration
+capabilities represented by the traits ProcessConfiguration,
+RedirectableInput, RedirectableOutput and RedirectableError.
To create a process use the constructor in the companion object
+Process.apply.
The process specification not only encodes the process to be started but
+also how its input, output and error streams are redirected and executed.
+For this reason the effect type is also bound by the process, not just at
+execution time.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
Process group is two or more processes attached to each other
Process group is two or more processes attached to each other
This implements a pipeline of processes. The input of the first process
+and the output of the last process is redirectable with the
+RedirectableInput and RedirectableOutput traits. The processes are
+attached to each other's input/output streams, the pipe between them is
+customizable.
The error streams are also redirectable with the RedirectableErrors
+trait.
+
This base trait is always extended with redirection and configuration
+capabilities represented by the traits ProcessConfiguration,
+RedirectableInput, RedirectableOutput and RedirectableError.
To create a process use the constructor in the companion object
+Process.apply.
The process specification not only encodes the process to be started but
+also how its input, output and error streams are redirected and executed.
+For this reason the effect type is also bound by the process, not just at
+execution time.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
Process group is two or more processes attached to each other
Process group is two or more processes attached to each other
This implements a pipeline of processes. The input of the first process
+and the output of the last process is redirectable with the
+RedirectableInput and RedirectableOutput traits. The processes are
+attached to each other's input/output streams, the pipe between them is
+customizable.
The error streams are also redirectable with the RedirectableErrors
+trait.
+
This base trait is always extended with redirection and configuration
+capabilities represented by the traits ProcessConfiguration,
+RedirectableInput, RedirectableOutput and RedirectableError.
To create a process use the constructor in the companion object
+Process.apply.
The process specification not only encodes the process to be started but
+also how its input, output and error streams are redirected and executed.
+For this reason the effect type is also bound by the process, not just at
+execution time.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
The low level operation to attach an error output to a process
The low level operation to attach an error output to a process
Use one of the other methods of this trait for convenience. This is the
+place where the output type gets calculated with a helper type class
+called OutputRedirectionType which implements the type level
+computation for figuring out E.
+
R
+ Error output redirection type
E
+ Error output type
target
+ Redirection target
outputRedirectionType
+ Helper for dependent error output type
returns
+ Returns a new process with its error output redirected and its error
+ redirection capability removed.
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
The low level operation to attach an output to a process
The low level operation to attach an output to a process
Use one of the other methods of this trait for convenience. This is the
+place where the output type gets calculated with a helper type class
+called OutputRedirectionType which implements the type level
+computation for figuring out O.
+
R
+ Output redirection type
O
+ Output type
target
+ Redirection target
outputRedirectionType
+ Helper for dependent output type
returns
+ Returns a new process or process group with its output redirected and
+ its output redirection capability removed.
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
Process group is two or more processes attached to each other
Process group is two or more processes attached to each other
This implements a pipeline of processes. The input of the first process
+and the output of the last process is redirectable with the
+RedirectableInput and RedirectableOutput traits. The processes are
+attached to each other's input/output streams, the pipe between them is
+customizable.
The error streams are also redirectable with the RedirectableErrors
+trait.
+
This base trait is always extended with redirection and configuration
+capabilities represented by the traits ProcessConfiguration,
+RedirectableInput, RedirectableOutput and RedirectableError.
To create a process use the constructor in the companion object
+Process.apply.
The process specification not only encodes the process to be started but
+also how its input, output and error streams are redirected and executed.
+For this reason the effect type is also bound by the process, not just at
+execution time.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
A process to be executed is represented by the Process trait. Once it
+has finished running the results are in ProcessResult. We call a group
+of processes attached together a process group, represented by
+ProcessGroup, its result is described by ProcessGroupResult.
Redirection of input, output and error is enabled by the
+RedirectableInput, RedirectableOutput and RedirectableError
+trait for single processes, and the RedirectableErrors trait for process
+groups.
Processes and process groups are executed by a ProcessRunner, the
+default implementation is called JVMProcessRunner.
+
Process group is two or more processes attached to each other
Process group is two or more processes attached to each other
This implements a pipeline of processes. The input of the first process
+and the output of the last process is redirectable with the
+RedirectableInput and RedirectableOutput traits. The processes are
+attached to each other's input/output streams, the pipe between them is
+customizable.
The error streams are also redirectable with the RedirectableErrors
+trait.
+
This base trait is always extended with redirection and configuration
+capabilities represented by the traits ProcessConfiguration,
+RedirectableInput, RedirectableOutput and RedirectableError.
To create a process use the constructor in the companion object
+Process.apply.
The process specification not only encodes the process to be started but
+also how its input, output and error streams are redirected and executed.
+For this reason the effect type is also bound by the process, not just at
+execution time.
+
\ No newline at end of file
diff --git a/docs/docs/docs/blogposts.md b/docs/docs/docs/blogposts.md
deleted file mode 100644
index 7c85db30..00000000
--- a/docs/docs/docs/blogposts.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-layout: docs
-title: Blog posts
----
-# Blog posts
-
-The following series of blog posts are based on the development of `prox`:
-
-- [Part 1 - type level programming](https://vigoo.github.io/posts/2019-02-10-prox-1-types.html)
-- [Part 2 - Akka Streams with Cats Effect](https://vigoo.github.io/posts/2019-03-07-prox-2-io-akkastreams.html)
-- [Part 3 - Effect abstraction and ZIO](https://vigoo.github.io/posts/2019-08-13-prox-3-zio.html)
-- [Part 4 - Simplified redesign](https://vigoo.github.io/posts/2020-08-03-prox-4-simplify.html)
diff --git a/docs/docs/docs/fs2/custom-runners.md b/docs/docs/docs/fs2/custom-runners.md
deleted file mode 100644
index 62ea841f..00000000
--- a/docs/docs/docs/fs2/custom-runners.md
+++ /dev/null
@@ -1,83 +0,0 @@
----
-layout: docs
-title: Custom runners
----
-
-# Customizing the runner
-
-```scala mdoc:invisible
-import cats.effect._
-import cats.Traverse
-import scala.concurrent.ExecutionContext
-import io.github.vigoo.prox._
-
-val prox = ProxFS2[IO]
-import prox._
-```
-
-The _runner_ is responsible for stating the native processes and wiring all the redirections together. The default
-implementation is called `JVMProcessRunner`.
-
-There are use cases when providing a custom runner makes sense. One such use case could be to launch external processes
-within a docker container in case of running on a development machine (for example from tests), while running them directly
-in production, when the whole service is running within the container.
-
-We can implement this scenario by using `JVMProcessRunner` in production and a custom `DockerizedProcessRunner` in tests,
-where we define the latter as follows:
-
-```scala mdoc
-import java.nio.file.Path
-import java.util.UUID
-
-case class DockerImage(name: String)
-
-case class DockerContainer(name: String)
-
-case class DockerProcessInfo[DockerProcessInfo](container: DockerContainer, dockerProcessInfo: DockerProcessInfo)
-
-class DockerizedProcessRunner[Info](processRunner: ProcessRunner[Info],
- mountedDirectory: Path,
- workingDirectory: Path,
- image: DockerImage)
- extends ProcessRunner[DockerProcessInfo[Info]] {
-
- override def startProcess[O, E](process: Process[O, E]): IO[RunningProcess[O, E, DockerProcessInfo[Info]]] = {
- for {
- container <- generateContainerName
- runningProcess <- processRunner
- .startProcess(wrapInDocker(process, container))
- } yield runningProcess.mapInfo(info => DockerProcessInfo(container, info))
- }
-
- override def startProcessGroup[O, E](processGroup: ProcessGroup[O, E]): IO[RunningProcessGroup[O, E, DockerProcessInfo[Info]]] = {
- Traverse[Vector].sequence(processGroup.originalProcesses.toVector.map(key => generateContainerName.map(c => key -> c))).flatMap { keyAndNames =>
- val nameMap = keyAndNames.toMap
- val names = keyAndNames.map(_._2)
- val modifiedProcessGroup = processGroup.map(new ProcessGroup.Mapper[O, E] {
- def mapFirst[P <: Process[fs2.Stream[IO, Byte], E]](process: P): P = wrapInDocker(process, names.head).asInstanceOf[P]
- def mapInnerWithIdx[P <: Process.UnboundIProcess[fs2.Stream[IO, Byte], E]](process: P, idx: Int): P =
- wrapInDocker(process, names(idx)).asInstanceOf[P]
- def mapLast[P <: Process.UnboundIProcess[O, E]](process: P): P = wrapInDocker(process, names.last).asInstanceOf[P]
- })
- processRunner.startProcessGroup(modifiedProcessGroup)
- .map(_.mapInfo { case (key, info) => DockerProcessInfo(nameMap(key), info) })
- }
- }
-
- private def generateContainerName: IO[DockerContainer] =
- IO(DockerContainer(UUID.randomUUID().toString))
-
- private def wrapInDocker[O, E](process: Process[O, E], container: DockerContainer): Process[O, E] = {
- val envVars = process.environmentVariables.flatMap { case (key, value) => List("-e", s"$key=$value") }.toList
- process.withCommand("docker").withArguments(
- "run" ::
- "--name" :: container.name ::
- "-v" :: mountedDirectory.toString ::
- "-w" :: workingDirectory.toString ::
- envVars :::
- List(image.name, process.command) :::
- process.arguments
- )
- }
-}
-```
diff --git a/docs/docs/docs/fs2/customize.md b/docs/docs/docs/fs2/customize.md
deleted file mode 100644
index a78b7e5f..00000000
--- a/docs/docs/docs/fs2/customize.md
+++ /dev/null
@@ -1,73 +0,0 @@
----
-layout: docs
-title: Customizing environment
----
-
-# Customizing the environment
-
-```scala mdoc:invisible
-import cats.effect._
-import scala.concurrent.ExecutionContext
-import io.github.vigoo.prox._
-
-val prox = ProxFS2[IO]
-import prox._
-```
-
-The type returned by the `Process` constructor also implements the `ProcessConfiguration` trait,
-adding three methods that can be used to customize the working environment of the process to be started:
-
-### Working directory
-
-The `in` method can be used to customize the working directory:
-
-```scala mdoc
-import io.github.vigoo.prox.path._
-
-val dir = home / "tmp"
-val proc1 = Process("ls") in dir
-```
-
-Not that `dir` has the type `java.nio.file.Path`, and the `home / tmp` syntax is just a thin
-syntax extension to produce such values.
-
-### Adding environment variables
-
-The `with` method can be used to add environment variables to the process in the following
-way:
-
-```scala mdoc
-val proc2 = Process("echo", List("$TEST")) `with` ("TEST" -> "Hello world")
-```
-
-### Removing environment variables
-
-The subprocess inherits the parent process environment, so it may be necessary to
-_remove_ some already defined environment variables with the `without` method:
-
-```scala mdoc
-val proc3 = Process("echo" , List("$PATH")) `without` "PATH"
-```
-
-### Writing reusable functions
-
-Because these methods are part of the `ProcessConfiguration` _capability_, writing reusable functions require us to define
-a polymorphic function that requires this capability:
-
-```scala mdoc
-import java.nio.file.Path
-
-def withHome[P <: ProcessLike with ProcessLikeConfiguration](home: Path, proc: P): P#Self =
- proc `with` ("HOME" -> home.toString)
-```
-
-Then we can use it on any kind of process or process group (read about [redirection](redirection) to understand
-why there are multiple concrete process types):
-
-```scala mdoc
-val proc4 = Process("echo", List("$HOME"))
-val proc5 = withHome(home, proc4)
-
-val group1 = Process("grep", List("ERROR")) | Process("sort")
-val group2 = withHome(home, group1)
-```
diff --git a/docs/docs/docs/fs2/index.md b/docs/docs/docs/fs2/index.md
deleted file mode 100644
index 30a3eece..00000000
--- a/docs/docs/docs/fs2/index.md
+++ /dev/null
@@ -1,88 +0,0 @@
----
-layout: docs
-title: Getting started
----
-
-# Getting started with prox
-
-First add one of the `prox` interfaces as a dependency:
-
-```sbt
-libraryDependencies += "io.github.vigoo" %% "prox-fs2" % "0.7.3"
-```
-
-or for Cats Effect 3.x / FS2 3.x:
-
-```sbt
-libraryDependencies += "io.github.vigoo" %% "prox-fs2-3" % "0.7.3"
-```
-
-and, assuming that we have a long living `Blocker` thread pool defined already, we can create
-the `Prox` module:
-
-```scala mdoc:invisible
-import cats.effect._
-import scala.concurrent.ExecutionContext
-import io.github.vigoo.prox._
-```
-
-```scala mdoc
-val prox = ProxFS2[IO]
-import prox._
-```
-
-We require `F` to implement the `Concurrent` type class, and for that we have to have an implicit
-_context shifter_ in scope (this should be already available in an application using cats-effect).
-
-### Defining a process to run
-In prox a process to be executed is defined by a pure value which implements the `Process[O, E]` trait.
-The type parameters have the following meaning:
-
-- `O` is the type of the output value after the system process has finished running
-- `E` is the type of the error output value after the system process has finished running
-
-To create a simple process to be executed use the `Process` constructor:
-
-```scala mdoc
-val proc1 = Process("ls", List("-hal"))
-```
-
-or we can use the _string interpolator_:
-
-```scala mdoc
-val proc2 = proc"ls -hal"
-```
-
-Then we can
-- [customize the process execution](customize) by for example setting environment variables and working directory
-- and [redirect the input, output and error](redirection) channels of the process
-- [pipe two or more processes together](processgroups)
-
-still staying on purely specification level.
-
-### Running the process
-
-Once we have our process specification ready, we can _start_ the process with one of the
-IO functions on process.
-
-But for this we first have to have a `ProcessRunner` implementation in scope. The default
-one is called `JVMProcessRunner` and it can be created in the following way:
-
-```scala mdoc:silent
-implicit val runner: ProcessRunner[JVMProcessInfo] = new JVMProcessRunner
-```
-
-Read the [custom process runners](custom-runners) page for an example of using a customized runner.
-
-With the runner in place we can use [several methods to start the process](running).
-The simplest one is called `run` and it blocks the active thread until the process finishes
-running:
-
-```scala mdoc
-proc1.run()
-```
-
-The result of this IO action is a `ProcessResult[O, E]`, with the ability to observe the
-_exit code_ and the redirected output and error values. In our first example both `O` and
-`E` were `Unit` because the default is to redirect output and error to the _standard output_ and
-_standard error_ streams.
diff --git a/docs/docs/docs/fs2/processgroups.md b/docs/docs/docs/fs2/processgroups.md
deleted file mode 100644
index 64945a6f..00000000
--- a/docs/docs/docs/fs2/processgroups.md
+++ /dev/null
@@ -1,52 +0,0 @@
----
-layout: docs
-title: Process groups
----
-
-# Connecting processes together via pipes
-```scala mdoc:invisible
-import cats.effect._
-import scala.concurrent.ExecutionContext
-import io.github.vigoo.prox._
-
-val prox = ProxFS2[IO]
-import prox._
-```
-
-Connecting one process to another means that the standard output of the first process
-gets redirected to the standard input of the second process. This is implemented using
-the redirection capabilities described [on the redirection page](redirection). The result
-of connecting one process to another is called a _process group_ and it implements the
-trait `ProcessGroup[O, E]`.
-
-To create a process group, either:
-- Use the `|` or `via` methods between two **unbounded** processes
-- Use the `|` or `via` methods between an **unbounded** process group and an **unbounded** process
-
-It is important that the process group construction must always happen before any redirection,
-the type system enforces this by requiring the involved processes to be `UnboundedProcess`.
-
-> :bulb: `Process.UnboundedProcess` is a type alias for a process with all the redirection capabilities
-
-Let's see an example of simply piping:
-
-```scala mdoc:silent
-val group1 = Process("grep", List("ERROR")) | Process("sort")
-val group2 = group1 | Process("uniq", List("-c"))
-```
-
-A custom pipe (when using `via`) can be anything of the type `Pipe[F, Byte, Byte]`. The
-following not very useful example capitalizes each word coming through:
-
-```scala mdoc:silent
-val customPipe: fs2.Pipe[IO, Byte, Byte] =
- (s: fs2.Stream[IO, Byte]) => s
- .through(fs2.text.utf8.decode) // decode UTF-8
- .through(fs2.text.lines) // split to lines
- .map(_.split(' ').toVector) // split lines to words
- .map(v => v.map(_.capitalize).mkString(" "))
- .intersperse("\n") // remerge lines
- .through(fs2.text.utf8.encode) // encode as UTF-8
-
-val group3 = Process("echo", List("hello world")).via(customPipe).to(Process("wc", List("-w")))
-```
\ No newline at end of file
diff --git a/docs/docs/docs/fs2/redirection.md b/docs/docs/docs/fs2/redirection.md
deleted file mode 100644
index c8093e97..00000000
--- a/docs/docs/docs/fs2/redirection.md
+++ /dev/null
@@ -1,176 +0,0 @@
----
-layout: docs
-title: Redirection
----
-
-# Redirecting input, output and error
-
-```scala mdoc:invisible
-import cats.effect._
-import scala.concurrent.ExecutionContext
-import io.github.vigoo.prox._
-
-val prox = ProxFS2[IO]
-import prox._
-```
-
-Similarly to [customization](customize), redirection is also implemented with _capability traits_.
-The `ProcessIO` type returned by the `Process` constructor implements all the three redirection capability
-traits:
-
-- `RedirectableInput` marks that the standard input of the process is not bound yet
-- `RedirectableOutput` marks that the standard output of the process is not bound yet
-- `RedirectableError` marks that the standard error output of the process is not bound yet
-
-Each of the three channels can be **only redirected once**. The result type of each redirection method no longer
-implements the given capability.
-
-Let's see an example of this (redirection methods are described below on this page):
-
-```scala mdoc
-import cats.implicits._
-
-val proc1 = Process("echo", List("Hello world"))
-val proc2 = proc1 ># fs2.text.utf8.decode
-```
-
-It is no longer possible to redirect the output of `proc2`:
-
-```scala mdoc:fail
-val proc3 = proc2 >? fs2.text.utf8.decode[IO].andThen(fs2.text.lines)
-```
-
-Many redirection methods have an _operator_ version but all of them have alphanumberic
-variants as well.
-
-### Input redirection
-Input redirection is enabled by the `RedirectableInput` trait. The following operations
-are supported:
-
-| operator | alternative | parameter type | what it does |
-|----------|--------------|----------------------|---------------|
-| `<` | `fromFile` | `java.nio.file.Path` | Natively attach a source file to STDIN |
-| `<` | `fromStream` | `Stream[F, Byte]` | Attach an _fs2 byte stream_ to STDIN |
-| `!<` | `fromStream` | `Stream[F, Byte]` | Attach an _fs2 byte stream_ to STDIN and **flush** after each chunk |
-
-### Output redirection
-Output redirection is enabled by the `RedirectableOutput` trait.
-The following operations are supported:
-
-| operator | alternative | parameter type | result type | what it does |
-|----------|----------------|--------------------------------|-------------| --------------|
-| `>` | `toFile` | `java.nio.file.Path` | `Unit` | Natively attach STDOUT to a file |
-| `>>` | `appendToFile` | `java.nio.file.Path` | `Unit` | Natively attach STDOUT to a file in append mode |
-| `>` | `toSink` | `Pipe[F, Byte, Unit]` | `Unit` | Drains the STDOUT through the given pipe |
-| `>#` | `toFoldMonoid` | `[O: Monoid](Pipe[F, Byte, O]` | `O` | Sends STDOUT through the pipe and folds the result using its _monoid_ instance
-| `>?` | `toVector` | `Pipe[F, Byte, O]` | `Vector[O]` | Sends STDOUT through the pipe and collects the results |
-| | `drainOutput` | `Pipe[F, Byte, O]` | `Unit` | Drains the STDOUT through the given pipe |
-| | `foldOutput` | `Pipe[F, Byte, O], R, (R, O) => R` | `R` | Sends STDOUT through the pipe and folds the result using a custom fold function |
-
-### Error redirection
-Error redirection is enabled by the `RedirectableError` trait.
-The following operations are supported:
-
-| operator | alternative | parameter type | result type | what it does |
-|-----------|---------------------|--------------------------------|-------------| --------------|
-| `!>` | `errorToFile` | `java.nio.file.Path` | `Unit` | Natively attach STDERR to a file |
-| `!>>` | `appendErrorToFile` | `java.nio.file.Path` | `Unit` | Natively attach STDERR to a file in append mode |
-| `!>` | `errorToSink` | `Pipe[F, Byte, Unit]` | `Unit` | Drains the STDERR through the given pipe |
-| `!>#` | `errorToFoldMonoid` | `[O: Monoid](Pipe[F, Byte, O]` | `O` | Sends STDERR through the pipe and folds the result using its _monoid_ instance
-| `!>?` | `errorToVector` | `Pipe[F, Byte, O]` | `Vector[O]` | Sends STDERR through the pipe and collects the results |
-| | `drainError` | `Pipe[F, Byte, O]` | `Unit` | Drains the STDERR through the given pipe |
-| | `foldError` | `Pipe[F, Byte, O], R, (R, O) => R` | `R` | Sends STDERR through the pipe and folds the result using a custom fold function |
-
-### Redirection for process groups
-[Process groups](processgroups) are two or more processes attached together through pipes.
-This connection is internally implemented using the above described redirection capabilities.
-This means that all but the first process has their _inputs_ bound, and all but the last one has
-their _outputs_ bound. Redirection of input and output for a _process group_ is thus a well defined
-operation meaning redirection of input of the _first_ process and redirection of output of the _last process_.
-
-For this reason the class created via _process piping_ implements the `RedirectableInput` and
-`RedirectableOutput` traits described above.
-
-For the sake of simplicity the library does not support anymore the fully customizable
-per-process error redirection for process groups, but a reduced but still quite expressive
-version described by the `RedirectableErrors` trait.
-
-The methods in this trait define error redirection for **all process in the group at once**:
-
-| operator | alternative | parameter type | result type | what it does |
-|-----------|----------------------|--------------------------------|-------------| --------------|
-| `!>` | `errorsToSink` | `Pipe[F, Byte, Unit]` | `Unit` | Drains the STDERR through the given pipe |
-| `!>#` | `errorsToFoldMonoid` | `[O: Monoid](Pipe[F, Byte, O]` | `O` | Sends STDERR through the pipe and folds the result using its _monoid_ instance
-| `!>?` | `errorsToVector` | `Pipe[F, Byte, O]` | `Vector[O]` | Sends STDERR through the pipe and collects the results |
-| | `drainErrors` | `Pipe[F, Byte, O]` | `Unit` | Drains the STDERR through the given pipe |
-| | `foldErrors` | `Pipe[F, Byte, O], R, (R, O) => R` | `R` | Sends STDERR through the pipe and folds the result using a custom fold function |
-
-Redirection to file is not possible through this interface as only a single path could be
-provided.
-The result of these redirections is accessible through the `ProcessGroupResult` interface as
-it is described in the [running processes section](running).
-
-By using the `RedirectableErrors.customizedPerProcess` interface (having the type
-`RedirectableErrors.CustomizedPerProcess`) it is possible to customize the redirection
-targets per process while keeping their types uniform:
-
-| operator | alternative | parameter type | result type | what it does |
-|-----------|----------------------|-----------------------------------------------|-------------| --------------|
-| | `errorsToFile` | `Process => java.nio.file.Path` | `Unit` | Natively attach STDERR to a file |
-| | `appendErrorsToFile` | `Process => java.nio.file.Path` | `Unit` | Natively attach STDERR to a file in append mode |
-| | `errorsToSink` | `Process => Pipe[F, Byte, Unit]` | `Unit` | Drains the STDERR through the given pipe |
-| | `errorsToFoldMonoid` | `Process => [O: Monoid](Pipe[F, Byte, O]` | `O` | Sends STDERR through the pipe and folds the result using its _monoid_ instance
-| | `errorsToVector` | `Process => Pipe[F, Byte, O]` | `Vector[O]` | Sends STDERR through the pipe and collects the results |
-| | `drainErrors` | `Process => Pipe[F, Byte, O]` | `Unit` | Drains the STDERR through the given pipe |
-| | `foldErrors` | `Process => Pipe[F, Byte, O], R, (R, O) => R` | `R` | Sends STDERR through the pipe and folds the result using a custom fold function |
-
-Let's see an example of how this works!
-
-First we define a queue where we want to send _error lines_ from all the involved
-processes, then we define the two processes separately, connect them with a pipe and
-customize the error redirection where we prefix the parsed lines based on which
-process they came from:
-
-
-```scala mdoc:silent
-import cats.effect.std.Queue
-
-for {
- errors <- Queue.unbounded[IO, String]
- parseLines = fs2.text.utf8.decode[IO].andThen(fs2.text.lines)
-
- p1 = Process("proc1")
- p2 = Process("proc2")
- group = (p1 | p2).customizedPerProcess.errorsToSink {
- case p if p == p1 => parseLines.andThen(_.map(s => "P1: " + s)).andThen(_.evalMap(errors.offer))
- case p if p == p2 => parseLines.andThen(_.map(s => "P2: " + s)).andThen(_.evalMap(errors.offer))
- }
-} yield ()
-```
-
-### Creating reusable functions
-The `Process` object contains several useful _type aliases_ for writing functions that work with any process by
-only specifying what redirection channels we want _unbounded_.
-
-The `UnboundProcess` represents a process which is fully unbound, no redirection has been done yet. It is
-defined as follows:
-
-```scala
-type UnboundProcess = Process[Unit, Unit]
- with RedirectableInput[UnboundOEProcess]
- with RedirectableOutput[UnboundIEProcess[*]]
- with RedirectableError[UnboundIOProcess[*]]
-```
-
-where `UnboundIOProcess[E]` for example represents a process which has its _error output_ already bound.
-
-These type aliases can be used to define functions performing redirection on arbitrary processes, for example:
-
-```scala mdoc
-def logErrors[P <: Process.UnboundEProcess[_]](proc: P) = {
- val target = fs2.text.utf8.decode[IO].andThen(fs2.text.lines).andThen(_.evalMap(line => IO(println(line))))
- proc !> target
-}
-
-val proc4 = logErrors(Process("something"))
-```
\ No newline at end of file
diff --git a/docs/docs/docs/fs2/running.md b/docs/docs/docs/fs2/running.md
deleted file mode 100644
index 083d336b..00000000
--- a/docs/docs/docs/fs2/running.md
+++ /dev/null
@@ -1,69 +0,0 @@
----
-layout: docs
-title: Running processes
----
-
-# Running processes and process groups
-```scala mdoc:invisible
-import io.github.vigoo.prox.zstream._
-```
-
-There are three methods for running a _process_:
-
-- The `run` method is the simplest one, it starts the process and then blocks the current fiber until it terminates
-- The `start` method starts the process and returns a fiber packed into a resource. The fiber finishes when the process terminates. Canceling the fiber terminates the process.
-- The `startProcess` method returns a `RunningProcess[O, E]` interface that allows advanced some operations
-
-Similarly for a _process group_, there is a `run`, a `start` and a `startProcessGroup` method but with different result types.
-
-Let's see some examples!
-
-```scala mdoc:silent
-implicit val runner: ProcessRunner[JVMProcessInfo] = new JVMProcessRunner
-
-val process = Process("echo", List("hello"))
-
-val result1 = process.run()
-val result2 = process.start().flatMap { fiber =>
- fiber.join
-}
-
-val result3 =
- for {
- runningProcess <- process.startProcess()
- _ <- runningProcess.kill()
- } yield ()
-```
-
-Both `RunningProcess` and `RunningProcessGroup` has the following methods:
-- `waitForExit()` waits until the process terminates
-- `terminate()` sends `SIGTERM` to the process
-- `kill()` sends `SIGKILL` to the process
-
-In addition `RunningProcess` also defines an `isAlive` check.
-
-### Process execution result
-The result of a process is represented by `ProcessResult[O, E]` defined as follows:
-
-```scala
-trait ProcessResult[+O, +E] {
- val exitCode: ExitCode
- val output: O
- val error: E
-}
-```
-
-The type and value of `output` and `error` depends on what [redirection was defined](redirection) on the process.
-
-### Process group execution result
-The result of a process group is represented by `ProcessGroupResult[O, E]`:
-
-```scala
-trait ProcessGroupResult[+O, +E] {
- val exitCodes: Map[Process[Unit, Unit], ExitCode]
- val output: O
- val errors: Map[Process[Unit, Unit], E]
-}
-```
-
-The keys of the maps are the original _process_ values used in the piping operations.
\ No newline at end of file
diff --git a/docs/docs/docs/index.md b/docs/docs/docs/index.md
deleted file mode 100644
index 40b6a585..00000000
--- a/docs/docs/docs/index.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-layout: docs
-title: Getting started
----
-
-Prox has two different interfaces:
-- [Cats Effect with FS2](fs2/index)
-- [ZIO with ZStream](zstream/index)
-
diff --git a/docs/docs/docs/migration.md b/docs/docs/docs/migration.md
deleted file mode 100644
index b6244296..00000000
--- a/docs/docs/docs/migration.md
+++ /dev/null
@@ -1,35 +0,0 @@
----
-layout: docs
-title: Migration
----
-# Migration
-
-### from 0.1.x to 0.2
-
-- The `start` method on processes now requires a `blockingExecutionContext` argument
-- `Ignore` has been renamed to `Drain`
-- `Log` has been renamed to `ToVector`
-
-### from 0.2 to 0.4
-
-- `Process` now takes the effect type as parameter, so in case of cats-effect, `Process(...)` becomes `Process[IO](...)`
-- The `start` method on processes now gets a `Blocker` instead of an execution context
-
-### from 0.4 to 0.5
-
-0.5 is a complete rewrite of the original library, and the API changed a lot, especially
-if the process types were used in code to pass around / wrap them. Please refer to the other
-sections of the documentation to learn how to reimplement them. For simple use cases where
-constructing and running the processes directly the main differences are:
-
-- Different operators / methods for different source and target types, see [the page about redirection](redirection)
-- The need of an implicit [process runner](running) in scope
-- New ways to start and wait for the process, see [the page about runnning processes](running)
-
-### from 0.5 to 0.6
-
-0.6 introduces the native ZIO/ZStream version of the library. For existing code the following differences apply:
-
-- Instead of `prox`, the artifact is now called `prox-fs2`
-- Instead of _global imports_, the FS2 prox module now has to be constructed with the `FS2` constructor and the API is imported from that
-- Because the `FS2` module captures the `F[_]` and the `Blocker`, they are no longer needed to pass on to the API functions and types
diff --git a/docs/docs/docs/zstream/custom-runners.md b/docs/docs/docs/zstream/custom-runners.md
deleted file mode 100644
index a5a975e8..00000000
--- a/docs/docs/docs/zstream/custom-runners.md
+++ /dev/null
@@ -1,80 +0,0 @@
----
-layout: docs
-title: Custom runners
----
-
-# Customizing the runner
-
-```scala mdoc:invisible
-import zio._
-import zio.stream._
-import io.github.vigoo.prox._
-import io.github.vigoo.prox.zstream._
-```
-
-The _runner_ is responsible for stating the native processes and wiring all the redirections together. The default
-implementation is called `JVMProcessRunner`.
-
-There are use cases when providing a custom runner makes sense. One such use case could be to launch external processes
-within a docker container in case of running on a development machine (for example from tests), while running them directly
-in production, when the whole service is running within the container.
-
-We can implement this scenario by using `JVMProcessRunner` in production and a custom `DockerizedProcessRunner` in tests,
-where we define the latter as follows:
-
-```scala mdoc
-import java.nio.file.Path
-import java.util.UUID
-
-case class DockerImage(name: String)
-
-case class DockerContainer(name: String)
-
-case class DockerProcessInfo[DockerProcessInfo](container: DockerContainer, dockerProcessInfo: DockerProcessInfo)
-
-class DockerizedProcessRunner[Info](processRunner: ProcessRunner[Info],
- mountedDirectory: Path,
- workingDirectory: Path,
- image: DockerImage)
- extends ProcessRunner[DockerProcessInfo[Info]] {
-
- override def startProcess[O, E](process: Process[O, E]): ZIO[Any, ProxError, RunningProcess[O, E, DockerProcessInfo[Info]]] = {
- for {
- container <- generateContainerName
- runningProcess <- processRunner
- .startProcess(wrapInDocker(process, container))
- } yield runningProcess.mapInfo(info => DockerProcessInfo(container, info))
- }
-
- override def startProcessGroup[O, E](processGroup: ProcessGroup[O, E]): ZIO[Any, ProxError, RunningProcessGroup[O, E, DockerProcessInfo[Info]]] = {
- ZIO.foreach(processGroup.originalProcesses.toVector)(key => generateContainerName.map(c => key -> c)).flatMap { keyAndNames =>
- val nameMap = keyAndNames.toMap
- val names = keyAndNames.map(_._2)
- val modifiedProcessGroup = processGroup.map(new ProcessGroup.Mapper[O, E] {
- def mapFirst[P <: Process[ZStream[Any, ProxError, Byte], E]](process: P): P = wrapInDocker(process, names.head).asInstanceOf[P]
- def mapInnerWithIdx[P <: Process.UnboundIProcess[ZStream[Any, ProxError, Byte], E]](process: P, idx: Int): P =
- wrapInDocker(process, names(idx)).asInstanceOf[P]
- def mapLast[P <: Process.UnboundIProcess[O, E]](process: P): P = wrapInDocker(process, names.last).asInstanceOf[P]
- })
- processRunner.startProcessGroup(modifiedProcessGroup)
- .map(_.mapInfo { case (key, info) => DockerProcessInfo(nameMap(key), info) })
- }
- }
-
- private def generateContainerName: ZIO[Any, ProxError, DockerContainer] =
- ZIO.attempt(DockerContainer(UUID.randomUUID().toString)).mapError(UnknownProxError)
-
- private def wrapInDocker[O, E](process: Process[O, E], container: DockerContainer): Process[O, E] = {
- val envVars = process.environmentVariables.flatMap { case (key, value) => List("-e", s"$key=$value") }.toList
- process.withCommand("docker").withArguments(
- "run" ::
- "--name" :: container.name ::
- "-v" :: mountedDirectory.toString ::
- "-w" :: workingDirectory.toString ::
- envVars :::
- List(image.name, process.command) :::
- process.arguments
- )
- }
-}
-```
diff --git a/docs/docs/docs/zstream/customize.md b/docs/docs/docs/zstream/customize.md
deleted file mode 100644
index 53b97a47..00000000
--- a/docs/docs/docs/zstream/customize.md
+++ /dev/null
@@ -1,69 +0,0 @@
----
-layout: docs
-title: Customizing environment
----
-
-# Customizing the environment
-
-```scala mdoc:invisible
-import io.github.vigoo.prox.zstream._
-import io.github.vigoo.prox._
-```
-
-The type returned by the `Process` constructor also implements the `ProcessConfiguration` trait,
-adding three methods that can be used to customize the working environment of the process to be started:
-
-### Working directory
-
-The `in` method can be used to customize the working directory:
-
-```scala mdoc
-import io.github.vigoo.prox.path._
-
-val dir = home / "tmp"
-val proc1 = Process("ls") in dir
-```
-
-Not that `dir` has the type `java.nio.file.Path`, and the `home / tmp` syntax is just a thin
-syntax extension to produce such values.
-
-### Adding environment variables
-
-The `with` method can be used to add environment variables to the process in the following
-way:
-
-```scala mdoc
-val proc2 = Process("echo", List("$TEST")) `with` ("TEST" -> "Hello world")
-```
-
-### Removing environment variables
-
-The subprocess inherits the parent process environment, so it may be necessary to
-_remove_ some already defined environment variables with the `without` method:
-
-```scala mdoc
-val proc3 = Process("echo" , List("$PATH")) `without` "PATH"
-```
-
-### Writing reusable functions
-
-Because these methods are part of the `ProcessConfiguration` _capability_, writing reusable functions require us to define
-a polymorphic function that requires this capability:
-
-```scala mdoc
-import java.nio.file.Path
-
-def withHome[P <: ProcessLike with ProcessLikeConfiguration](home: Path, proc: P): P#Self =
- proc `with` ("HOME" -> home.toString)
-```
-
-Then we can use it on any kind of process or process group (read about [redirection](redirection) to understand
-why there are multiple concrete process types):
-
-```scala mdoc
-val proc4 = Process("echo", List("$HOME"))
-val proc5 = withHome(home, proc4)
-
-val group1 = Process("grep", List("ERROR")) | Process("sort")
-val group2 = withHome(home, group1)
-```
\ No newline at end of file
diff --git a/docs/docs/docs/zstream/index.md b/docs/docs/docs/zstream/index.md
deleted file mode 100644
index 27e504e8..00000000
--- a/docs/docs/docs/zstream/index.md
+++ /dev/null
@@ -1,80 +0,0 @@
----
-layout: docs
-title: Getting started
----
-
-# Getting started with prox
-
-First add one of the `prox` interfaces as a dependency:
-
-```sbt
-libraryDependencies += "io.github.vigoo" %% "prox-zstream" % "0.7.3"
-```
-
-and import the ZIO specific API from:
-
-```scala mdoc
-import io.github.vigoo.prox._
-import io.github.vigoo.prox.zstream._
-```
-
-There is also an experimental version for ZIO 2, based on it's snapshot releases:
-
-```sbt
-libraryDependencies += "io.github.vigoo" %% "prox-zstream-2" % "0.7.3"
-```
-
-The code snippets in the documentation are based on the ZIO 1 version.
-
-### Defining a process to run
-In prox a process to be executed is defined by a pure value which implements the `Process[O, E]` trait.
-The type parameters have the following meaning:
-
-- `O` is the type of the output value after the system process has finished running
-- `E` is the type of the error output value after the system process has finished running
-
-To create a simple process to be executed use the `Process` constructor:
-
-```scala mdoc
-val proc1 = Process("ls", List("-hal"))
-```
-
-or we can use the _string interpolator_:
-
-```scala mdoc
-val proc2 = proc"ls -hal"
-```
-
-Then we can
-- [customize the process execution](customize) by for example setting environment variables and working directory
-- and [redirect the input, output and error](redirection) channels of the process
-- [pipe two or more processes together](processgroups)
-
-still staying on purely specification level.
-
-### Running the process
-
-Once we have our process specification ready, we can _start_ the process with one of the
-IO functions on process.
-
-But for this we first have to have a `ProcessRunner` implementation in scope. The default
-one is called `JVMProcessRunner` and it can be created in the following way:
-
-```scala mdoc:silent
-implicit val runner: ProcessRunner[JVMProcessInfo] = new JVMProcessRunner
-```
-
-Read the [custom process runners](custom-runners) page for an example of using a customized runner.
-
-With the runner in place we can use [several methods to start the process](running).
-The simplest one is called `run` and it blocks the active thread until the process finishes
-running:
-
-```scala mdoc
-proc1.run()
-```
-
-The result of this IO action is a `ProcessResult[O, E]`, with the ability to observe the
-_exit code_ and the redirected output and error values. In our first example both `O` and
-`E` were `Unit` because the default is to redirect output and error to the _standard output_ and
-_standard error_ streams.
diff --git a/docs/docs/docs/zstream/processgroups.md b/docs/docs/docs/zstream/processgroups.md
deleted file mode 100644
index 5470bbe2..00000000
--- a/docs/docs/docs/zstream/processgroups.md
+++ /dev/null
@@ -1,52 +0,0 @@
----
-layout: docs
-title: Process groups
----
-
-# Connecting processes together via pipes
-```scala mdoc:invisible
-import zio._
-import zio.stream._
-import zio.prelude._
-import io.github.vigoo.prox._
-import io.github.vigoo.prox.zstream._
-import java.nio.charset.StandardCharsets
-```
-
-Connecting one process to another means that the standard output of the first process
-gets redirected to the standard input of the second process. This is implemented using
-the redirection capabilities described [on the redirection page](redirection). The result
-of connecting one process to another is called a _process group_ and it implements the
-trait `ProcessGroup[O, E]`.
-
-To create a process group, either:
-- Use the `|` or `via` methods between two **unbounded** processes
-- Use the `|` or `via` methods between an **unbounded** process group and an **unbounded** process
-
-It is important that the process group construction must always happen before any redirection,
-the type system enforces this by requiring the involved processes to be `UnboundedProcess`.
-
-> :bulb: `Process.UnboundedProcess` is a type alias for a process with all the redirection capabilities
-
-Let's see an example of simply pipeing:
-
-```scala mdoc:silent
-val group1 = Process("grep", List("ERROR")) | Process("sort")
-val group2 = group1 | Process("uniq", List("-c"))
-```
-
-A custom pipe (when using `via`) can be anything of the type `ZStream[any, ProxError, Byte] => ZStream[any, ProxError, Byte])`.
-The following not very useful example capitalizes each word coming through:
-
-```scala mdoc:silent
-val customPipe: ProxPipe[Byte, Byte] =
- (s: ZStream[Any, ProxError, Byte]) => s
- .via(ZPipeline.utf8Decode.mapError(UnknownProxError.apply)) // decode UTF-8
- .via(ZPipeline.splitLines) // split to lines
- .map(_.split(' ').toVector) // split lines to words
- .map(v => v.map(_.capitalize).mkString(" "))
- .intersperse("\n") // remerge lines
- .flatMap(str => ZStream.fromIterable(str.getBytes(StandardCharsets.UTF_8))) // reencode
-
-val group3 = Process("echo", List("hello world")).via(customPipe).to(Process("wc", List("-w")))
-```
\ No newline at end of file
diff --git a/docs/docs/docs/zstream/redirection.md b/docs/docs/docs/zstream/redirection.md
deleted file mode 100644
index b1f04bfd..00000000
--- a/docs/docs/docs/zstream/redirection.md
+++ /dev/null
@@ -1,185 +0,0 @@
----
-layout: docs
-title: Redirection
----
-
-# Redirecting input, output and error
-
-```scala mdoc:invisible
-import io.github.vigoo.prox._
-import io.github.vigoo.prox.zstream._
-```
-
-Similarly to [customization](customize), redirection is also implemented with _capability traits_.
-The `ProcessIO` type returned by the `Process` constructor implements all the three redirection capability
-traits:
-
-- `RedirectableInput` marks that the standard input of the process is not bound yet
-- `RedirectableOutput` marks that the standard output of the process is not bound yet
-- `RedirectableError` marks that the standard error output of the process is not bound yet
-
-Each of the three channels can be **only redirected once**. The result type of each redirection method no longer
-implements the given capability.
-
-Let's see an example of this (redirection methods are described below on this page):
-
-```scala mdoc
-import zio._
-import zio.stream._
-import zio.prelude._
-
-val proc1 = Process("echo", List("Hello world"))
-val proc2 = proc1 ># ZPipeline.utf8Decode
-```
-
-It is no longer possible to redirect the output of `proc2`:
-
-```scala mdoc:fail
-val proc3 = proc2 >? (ZPipeline.utf8Decode >>> ZPipeline.splitLines)
-```
-
-Many redirection methods have an _operator_ version but all of them have alphanumberic
-variants as well.
-
-### Input redirection
-Input redirection is enabled by the `RedirectableInput` trait. The following operations
-are supported:
-
-| operator | alternative | parameter type | what it does |
-|----------|--------------|---------------------------------|---------------|
-| `<` | `fromFile` | `java.nio.file.Path` | Natively attach a source file to STDIN |
-| `<` | `fromStream` | `ZStream[Any, ProxError, Byte]` | Attach a _ZIO byte stream_ to STDIN |
-| `!<` | `fromStream` | `ZStream[Any, ProxError, Byte]` | Attach a _ZIO byte stream_ to STDIN and **flush** after each chunk |
-
-### Output redirection
-Output redirection is enabled by the `RedirectableOutput` trait.
-The following operations are supported:
-
-| operator | alternative | parameter type | result type | what it does |
-|----------|----------------|--------------------------------------------------------------------------------|-------------| --------------|
-| `>` | `toFile` | `java.nio.file.Path` | `Unit` | Natively attach STDOUT to a file |
-| `>>` | `appendToFile` | `java.nio.file.Path` | `Unit` | Natively attach STDOUT to a file in append mode |
-| `>` | `toSink` | `TransformAndSink[Byte, _]` | `Unit` | Drains the STDOUT through the given sink |
-| `>#` | `toFoldMonoid` | `[O: Identity](ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O])` | `O` | Sends STDOUT through the stream and folds the result using its _monoid_ instance
-| `>?` | `toVector` | `ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O])` | `Vector[O]` | Sends STDOUT through the stream and collects the results |
-| | `drainOutput` | `ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O])` | `Unit` | Drains the STDOUT through the given stream |
-| | `foldOutput` | `ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O]), R, (R, O) => R` | `R` | Sends STDOUT through the stream and folds the result using a custom fold function |
-
-All the variants that accept a _stream transformation_ (`ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O])`) are also usable by directly passing
-a `ZPipeline`.
-
-`TransformAndSink` encapsulates a _stream transformation_ and a _unit sink_. It is possible to use a sink directly if transformation is not needed.
-
-```scala
-case class TransformAndSink[A, B](transform: ZStream[Any, ProxError, A] => ZStream[Any, ProxError, B],
- sink: ZSink[Any, ProxError, B, Any, Unit])
-```
-
-### Error redirection
-Error redirection is enabled by the `RedirectableError` trait.
-The following operations are supported:
-
-| operator | alternative | parameter type | result type | what it does |
-|-----------|---------------------|--------------------------------------------------------------------------------|-------------| --------------|
-| `!>` | `errorToFile` | `java.nio.file.Path` | `Unit` | Natively attach STDERR to a file |
-| `!>>` | `appendErrorToFile` | `java.nio.file.Path` | `Unit` | Natively attach STDERR to a file in append mode |
-| `!>` | `errorToSink` | `TransformAndSink[Byte, _]` | `Unit` | Drains the STDERR through the given sink |
-| `!>#` | `errorToFoldMonoid` | `[O: Monoid](ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O])` | `O` | Sends STDERR through the pipe and folds the result using its _monoid_ instance
-| `!>?` | `errorToVector` | `ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O])` | `Vector[O]` | Sends STDERR through the pipe and collects the results |
-| | `drainError` | `ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O])` | `Unit` | Drains the STDERR through the given pipe |
-| | `foldError` | `ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O]), R, (R, O) => R` | `R` | Sends STDERR through the pipe and folds the result using a custom fold function |
-
-### Redirection for process groups
-[Process groups](processgroups) are two or more processes attached together through pipes.
-This connection is internally implemented using the above described redirection capabilities.
-This means that all but the first process has their _inputs_ bound, and all but the last one has
-their _outputs_ bound. Redirection of input and output for a _process group_ is thus a well defined
-operation meaning redirection of input of the _first_ process and redirection of output of the _last process_.
-
-For this reason the class created via _process piping_ implements the `RedirectableInput` and
-`RedirectableOutput` traits described above.
-
-For the sake of simplicity the library does not support anymore the fully customizable
-per-process error redirection for process groups, but a reduced but still quite expressive
-version described by the `RedirectableErrors` trait.
-
-The methods in this trait define error redirection for **all process in the group at once**:
-
-| operator | alternative | parameter type | result type | what it does |
-|-----------|----------------------|--------------------------------------------------------------------------------|-------------| --------------|
-| `!>` | `errorsToSink` | `TransformAndSink[Byte, _]` | `Unit` | Drains the STDERR through the given sink |
-| `!>#` | `errorsToFoldMonoid` | `[O: Monoid](ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O])` | `O` | Sends STDERR through the stream and folds the result using its _monoid_ instance
-| `!>?` | `errorsToVector` | `ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O])` | `Vector[O]` | Sends STDERR through the stream and collects the results |
-| | `drainErrors` | `ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O])` | `Unit` | Drains the STDERR through the given stream |
-| | `foldErrors` | `ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O]), R, (R, O) => R` | `R` | Sends STDERR through the stream and folds the result using a custom fold function |
-
-Redirection to file is not possible through this interface as only a single path could be
-provided.
-The result of these redirections is accessible through the `ProcessGroupResult` interface as
-it is described in the [running processes section](running).
-
-By using the `RedirectableErrors.customizedPerProcess` interface (having the type
-`RedirectableErrors.CustomizedPerProcess`) it is possible to customize the redirection
-targets per process while keeping their types uniform:
-
-| operator | alternative | parameter type | result type | what it does |
-|-----------|----------------------|-------------------------------------------------------------------------------------------|-------------| --------------|
-| | `errorsToFile` | `Process => java.nio.file.Path` | `Unit` | Natively attach STDERR to a file |
-| | `appendErrorsToFile` | `Process => java.nio.file.Path` | `Unit` | Natively attach STDERR to a file in append mode |
-| | `errorsToSink` | `Process => TransformAndSink[Byte, _]` | `Unit` | Drains the STDERR through the given sink |
-| | `errorsToFoldMonoid` | `Process => [O: Monoid](ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O])` | `O` | Sends STDERR through the stream and folds the result using its _monoid_ instance
-| | `errorsToVector` | `Process => ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O])` | `Vector[O]` | Sends STDERR through the stream and collects the results |
-| | `drainErrors` | `Process => ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O])` | `Unit` | Drains the STDERR through the given stream |
-| | `foldErrors` | `Process => ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O]), R, (R, O) => R` | `R` | Sends STDERR through the stream and folds the result using a custom fold function |
-
-Let's see an example of how this works!
-
-First we define a queue where we want to send _error lines_ from all the involved
-processes, then we define the two processes separately, connect them with a pipe and
-customize the error redirection where we prefix the parsed lines based on which
-process they came from:
-
-
-```scala mdoc:silent
-
-for {
- errors <- Queue.unbounded[String]
- parseLines = (s: ZStream[Any, ProxError, Byte]) => s.via(ZPipeline.utf8Decode.mapError(UnknownProxError.apply) >>> ZPipeline.splitLines)
-
- p1 = Process("proc1")
- p2 = Process("proc2")
- group = (p1 | p2).customizedPerProcess.errorsToSink {
- case p if p == p1 => TransformAndSink(parseLines.andThen(_.map(s => "P1: " + s)), ZSink.foreach(errors.offer))
- case p if p == p2 => TransformAndSink(parseLines.andThen(_.map(s => "P2: " + s)), ZSink.foreach(errors.offer))
- }
-} yield ()
-```
-
-### Creating reusable functions
-The `Process` object contains several useful _type aliases_ for writing functions that work with any process by
-only specifying what redirection channels we want _unbounded_.
-
-The `UnboundProcess` represents a process which is fully unbound, no redirection has been done yet. It is
-defined as follows:
-
-```scala
-type UnboundProcess = Process[Unit, Unit]
- with RedirectableInput[UnboundOEProcess]
- with RedirectableOutput[UnboundIEProcess[*]]
- with RedirectableError[UnboundIOProcess[*]]
-```
-
-where `UnboundIOProcess[E]` for example represents a process which has its _error output_ already bound.
-
-These type aliases can be used to define functions performing redirection on arbitrary processes, for example:
-
-```scala mdoc
-def logErrors[P <: Process.UnboundEProcess[_]](proc: P) = {
- val target = TransformAndSink(
- ZPipeline.utf8Decode.mapError(UnknownProxError.apply) >>> ZPipeline.splitLines,
- ZSink.foreach((line: String) => ZIO.debug(line)))
- proc !> target
-}
-
-val proc4 = logErrors(Process("something"))
-```
\ No newline at end of file
diff --git a/docs/docs/docs/zstream/running.md b/docs/docs/docs/zstream/running.md
deleted file mode 100644
index a2aa7a78..00000000
--- a/docs/docs/docs/zstream/running.md
+++ /dev/null
@@ -1,73 +0,0 @@
----
-layout: docs
-title: Running processes
----
-
-# Running processes and process groups
-```scala mdoc:invisible
-import zio._
-import io.github.vigoo.prox._
-import io.github.vigoo.prox.zstream._
-```
-
-There are three methods for running a _process_:
-
-- The `run` method is the simplest one, it starts the process and then blocks the current fiber until it terminates
-- The `start` method starts the process and returns a fiber packed into a resource. The fiber finishes when the process terminates. Canceling the fiber terminates the process.
-- The `startProcess` method returns a `RunningProcess[O, E]` interface that allows advanced some operations
-
-Similarly for a _process group_, there is a `run`, a `start` and a `startProcessGroup` method but with different result types.
-
-Let's see some examples!
-
-```scala mdoc:silent
-implicit val runner: ProcessRunner[JVMProcessInfo] = new JVMProcessRunner
-
-val process = Process("echo", List("hello"))
-
-val result1 = process.run()
-val result2 = ZIO.scoped {
- process.start().flatMap { fiber =>
- fiber.join
- }
-}
-
-val result3 =
- for {
- runningProcess <- process.startProcess()
- _ <- runningProcess.kill()
- } yield ()
-```
-
-Both `RunningProcess` and `RunningProcessGroup` has the following methods:
-- `waitForExit()` waits until the process terminates
-- `terminate()` sends `SIGTERM` to the process
-- `kill()` sends `SIGKILL` to the process
-
-In addition `RunningProcess` also defines an `isAlive` check.
-
-### Process execution result
-The result of a process is represented by `ProcessResult[O, E]` defined as follows:
-
-```scala
-trait ProcessResult[+O, +E] {
- val exitCode: ExitCode
- val output: O
- val error: E
-}
-```
-
-The type and value of `output` and `error` depends on what [redirection was defined](redirection) on the process.
-
-### Process group execution result
-The result of a process group is represented by `ProcessGroupResult[O, E]`:
-
-```scala
-trait ProcessGroupResult[+O, +E] {
- val exitCodes: Map[Process[Unit, Unit], ExitCode]
- val output: O
- val errors: Map[Process[Unit, Unit], E]
-}
-```
-
-The keys of the maps are the original _process_ values used in the piping operations.
\ No newline at end of file
diff --git a/docs/docs/index.md b/docs/docs/index.md
deleted file mode 100644
index d62d8a44..00000000
--- a/docs/docs/index.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-layout: homeFeatures
-title: "prox: Home"
-features:
- - first: ["Type safe", "Define the execution of one or more system processes in a type safe way"]
- - second: ["Purely functional", "Compose the process execution as an IO effect"]
- - third: ["Streaming", "Redirect input, output and error to/from functional streams"]
----
-
-Prox is a Scala library for running system processes, plugging them to each other and redirecting them to streams.
diff --git a/docs/fs2/custom-runners.html b/docs/fs2/custom-runners.html
new file mode 100644
index 00000000..d8bd7104
--- /dev/null
+++ b/docs/fs2/custom-runners.html
@@ -0,0 +1,93 @@
+prox: Custom runners
The runner is responsible for stating the native processes and wiring all the redirections together. The default
+implementation is called JVMProcessRunner.
+
+
There are use cases when providing a custom runner makes sense. One such use case could be to launch external processes
+within a docker container in case of running on a development machine (for example from tests), while running them directly
+in production, when the whole service is running within the container.
+
+
We can implement this scenario by using JVMProcessRunner in production and a custom DockerizedProcessRunner in tests,
+where we define the latter as follows:
\ No newline at end of file
diff --git a/docs/fs2/customize.html b/docs/fs2/customize.html
new file mode 100644
index 00000000..e2a6b2f7
--- /dev/null
+++ b/docs/fs2/customize.html
@@ -0,0 +1,225 @@
+prox: Customizing environment
The type returned by the Process constructor also implements the ProcessConfiguration trait,
+adding three methods that can be used to customize the working environment of the process to be started:
+
+
Working directory
+
+
The in method can be used to customize the working directory:
The subprocess inherits the parent process environment, so it may be necessary to
+remove some already defined environment variables with the without method:
Because these methods are part of the ProcessConfigurationcapability, writing reusable functions require us to define
+a polymorphic function that requires this capability:
\ No newline at end of file
diff --git a/docs/fs2/index.html b/docs/fs2/index.html
new file mode 100644
index 00000000..ea96dbbb
--- /dev/null
+++ b/docs/fs2/index.html
@@ -0,0 +1,129 @@
+prox: Getting started
We require F to implement the Concurrent type class, and for that we have to have an implicit
+context shifter in scope (this should be already available in an application using cats-effect).
+
+
Defining a process to run
+
In prox a process to be executed is defined by a pure value which implements the Process[O, E] trait.
+The type parameters have the following meaning:
+
+
+
O is the type of the output value after the system process has finished running
+
E is the type of the error output value after the system process has finished running
+
+
+
To create a simple process to be executed use the Process constructor:
Once we have our process specification ready, we can start the process with one of the
+IO functions on process.
+
+
But for this we first have to have a ProcessRunner implementation in scope. The default
+one is called JVMProcessRunner and it can be created in the following way:
With the runner in place we can use several methods to start the process.
+The simplest one is called run and it blocks the active thread until the process finishes
+running:
The result of this IO action is a ProcessResult[O, E], with the ability to observe the
+exit code and the redirected output and error values. In our first example both O and
+E were Unit because the default is to redirect output and error to the standard output and
+standard error streams.
+
\ No newline at end of file
diff --git a/docs/fs2/processgroups.html b/docs/fs2/processgroups.html
new file mode 100644
index 00000000..7f24f218
--- /dev/null
+++ b/docs/fs2/processgroups.html
@@ -0,0 +1,95 @@
+prox: Process groups
Connecting one process to another means that the standard output of the first process
+gets redirected to the standard input of the second process. This is implemented using
+the redirection capabilities described on the redirection page. The result
+of connecting one process to another is called a process group and it implements the
+trait ProcessGroup[O, E].
+
+
To create a process group, either:
+
+
Use the | or via methods between two unbounded processes
+
Use the | or via methods between an unbounded process group and an unbounded process
+
+
+
It is important that the process group construction must always happen before any redirection,
+the type system enforces this by requiring the involved processes to be UnboundedProcess.
+
+
+
Process.UnboundedProcess is a type alias for a process with all the redirection capabilities
A custom pipe (when using via) can be anything of the type Pipe[F, Byte, Byte]. The
+following not very useful example capitalizes each word coming through:
+
+
valcustomPipe:fs2.Pipe[IO, Byte, Byte]=
+ (s:fs2.Stream[IO, Byte])=>s
+ .through(fs2.text.utf8.decode)// decode UTF-8
+ .through(fs2.text.lines)// split to lines
+ .map(_.split(' ').toVector)// split lines to words
+ .map(v=>v.map(_.capitalize).mkString(" "))
+ .intersperse("\n")// remerge lines
+ .through(fs2.text.utf8.encode)// encode as UTF-8
+
+valgroup3=Process("echo",List("hello world")).via(customPipe).to(Process("wc",List("-w")))
+
+
+
+
\ No newline at end of file
diff --git a/docs/fs2/redirection.html b/docs/fs2/redirection.html
new file mode 100644
index 00000000..cb890ed9
--- /dev/null
+++ b/docs/fs2/redirection.html
@@ -0,0 +1,456 @@
+prox: Redirection
Similarly to customization, redirection is also implemented with capability traits.
+The ProcessIO type returned by the Process constructor implements all the three redirection capability
+traits:
+
+
+
RedirectableInput marks that the standard input of the process is not bound yet
+
RedirectableOutput marks that the standard output of the process is not bound yet
+
RedirectableError marks that the standard error output of the process is not bound yet
+
+
+
Each of the three channels can be only redirected once. The result type of each redirection method no longer
+implements the given capability.
+
+
Let’s see an example of this (redirection methods are described below on this page):
It is no longer possible to redirect the output of proc2:
+
+
valproc3=proc2>?fs2.text.utf8.decode[IO].andThen(fs2.text.lines)
+// error: value >? is not a member of repl.MdocSession.MdocApp.prox.Process.ProcessImplO[String]
+// did you mean !>??
+// val proc3 = proc2 >? fs2.text.utf8.decode[IO].andThen(fs2.text.lines)
+// ^^^^^^^^
+
+
+
Many redirection methods have an operator version but all of them have alphanumberic
+variants as well.
+
+
Input redirection
+
Input redirection is enabled by the RedirectableInput trait. The following operations
+are supported:
+
+
+
+
+
operator
+
alternative
+
parameter type
+
what it does
+
+
+
+
+
<
+
fromFile
+
java.nio.file.Path
+
Natively attach a source file to STDIN
+
+
+
<
+
fromStream
+
Stream[F, Byte]
+
Attach an fs2 byte stream to STDIN
+
+
+
!<
+
fromStream
+
Stream[F, Byte]
+
Attach an fs2 byte stream to STDIN and flush after each chunk
+
+
+
+
+
Output redirection
+
Output redirection is enabled by the RedirectableOutput trait.
+The following operations are supported:
+
+
+
+
+
operator
+
alternative
+
parameter type
+
result type
+
what it does
+
+
+
+
+
>
+
toFile
+
java.nio.file.Path
+
Unit
+
Natively attach STDOUT to a file
+
+
+
>>
+
appendToFile
+
java.nio.file.Path
+
Unit
+
Natively attach STDOUT to a file in append mode
+
+
+
>
+
toSink
+
Pipe[F, Byte, Unit]
+
Unit
+
Drains the STDOUT through the given pipe
+
+
+
>#
+
toFoldMonoid
+
[O: Monoid](Pipe[F, Byte, O]
+
O
+
Sends STDOUT through the pipe and folds the result using its monoid instance
+
+
+
>?
+
toVector
+
Pipe[F, Byte, O]
+
Vector[O]
+
Sends STDOUT through the pipe and collects the results
+
+
+
+
drainOutput
+
Pipe[F, Byte, O]
+
Unit
+
Drains the STDOUT through the given pipe
+
+
+
+
foldOutput
+
Pipe[F, Byte, O], R, (R, O) => R
+
R
+
Sends STDOUT through the pipe and folds the result using a custom fold function
+
+
+
+
+
Error redirection
+
Error redirection is enabled by the RedirectableError trait.
+The following operations are supported:
+
+
+
+
+
operator
+
alternative
+
parameter type
+
result type
+
what it does
+
+
+
+
+
!>
+
errorToFile
+
java.nio.file.Path
+
Unit
+
Natively attach STDERR to a file
+
+
+
!>>
+
appendErrorToFile
+
java.nio.file.Path
+
Unit
+
Natively attach STDERR to a file in append mode
+
+
+
!>
+
errorToSink
+
Pipe[F, Byte, Unit]
+
Unit
+
Drains the STDERR through the given pipe
+
+
+
!>#
+
errorToFoldMonoid
+
[O: Monoid](Pipe[F, Byte, O]
+
O
+
Sends STDERR through the pipe and folds the result using its monoid instance
+
+
+
!>?
+
errorToVector
+
Pipe[F, Byte, O]
+
Vector[O]
+
Sends STDERR through the pipe and collects the results
+
+
+
+
drainError
+
Pipe[F, Byte, O]
+
Unit
+
Drains the STDERR through the given pipe
+
+
+
+
foldError
+
Pipe[F, Byte, O], R, (R, O) => R
+
R
+
Sends STDERR through the pipe and folds the result using a custom fold function
+
+
+
+
+
Redirection for process groups
+
Process groups are two or more processes attached together through pipes.
+This connection is internally implemented using the above described redirection capabilities.
+This means that all but the first process has their inputs bound, and all but the last one has
+their outputs bound. Redirection of input and output for a process group is thus a well defined
+operation meaning redirection of input of the first process and redirection of output of the last process.
+
+
For this reason the class created via process piping implements the RedirectableInput and
+RedirectableOutput traits described above.
+
+
For the sake of simplicity the library does not support anymore the fully customizable
+per-process error redirection for process groups, but a reduced but still quite expressive
+version described by the RedirectableErrors trait.
+
+
The methods in this trait define error redirection for all process in the group at once:
+
+
+
+
+
operator
+
alternative
+
parameter type
+
result type
+
what it does
+
+
+
+
+
!>
+
errorsToSink
+
Pipe[F, Byte, Unit]
+
Unit
+
Drains the STDERR through the given pipe
+
+
+
!>#
+
errorsToFoldMonoid
+
[O: Monoid](Pipe[F, Byte, O]
+
O
+
Sends STDERR through the pipe and folds the result using its monoid instance
+
+
+
!>?
+
errorsToVector
+
Pipe[F, Byte, O]
+
Vector[O]
+
Sends STDERR through the pipe and collects the results
+
+
+
+
drainErrors
+
Pipe[F, Byte, O]
+
Unit
+
Drains the STDERR through the given pipe
+
+
+
+
foldErrors
+
Pipe[F, Byte, O], R, (R, O) => R
+
R
+
Sends STDERR through the pipe and folds the result using a custom fold function
+
+
+
+
+
Redirection to file is not possible through this interface as only a single path could be
+provided.
+The result of these redirections is accessible through the ProcessGroupResult interface as
+it is described in the running processes section.
+
+
By using the RedirectableErrors.customizedPerProcess interface (having the type
+RedirectableErrors.CustomizedPerProcess) it is possible to customize the redirection
+targets per process while keeping their types uniform:
+
+
+
+
+
operator
+
alternative
+
parameter type
+
result type
+
what it does
+
+
+
+
+
+
errorsToFile
+
Process => java.nio.file.Path
+
Unit
+
Natively attach STDERR to a file
+
+
+
+
appendErrorsToFile
+
Process => java.nio.file.Path
+
Unit
+
Natively attach STDERR to a file in append mode
+
+
+
+
errorsToSink
+
Process => Pipe[F, Byte, Unit]
+
Unit
+
Drains the STDERR through the given pipe
+
+
+
+
errorsToFoldMonoid
+
Process => [O: Monoid](Pipe[F, Byte, O]
+
O
+
Sends STDERR through the pipe and folds the result using its monoid instance
+
+
+
+
errorsToVector
+
Process => Pipe[F, Byte, O]
+
Vector[O]
+
Sends STDERR through the pipe and collects the results
+
+
+
+
drainErrors
+
Process => Pipe[F, Byte, O]
+
Unit
+
Drains the STDERR through the given pipe
+
+
+
+
foldErrors
+
Process => Pipe[F, Byte, O], R, (R, O) => R
+
R
+
Sends STDERR through the pipe and folds the result using a custom fold function
+
+
+
+
+
Let’s see an example of how this works!
+
+
First we define a queue where we want to send error lines from all the involved
+processes, then we define the two processes separately, connect them with a pipe and
+customize the error redirection where we prefix the parsed lines based on which
+process they came from:
The Process object contains several useful type aliases for writing functions that work with any process by
+only specifying what redirection channels we want unbounded.
+
+
The UnboundProcess represents a process which is fully unbound, no redirection has been done yet. It is
+defined as follows:
\ No newline at end of file
diff --git a/docs/fs2/running.html b/docs/fs2/running.html
new file mode 100644
index 00000000..3fe4432f
--- /dev/null
+++ b/docs/fs2/running.html
@@ -0,0 +1,88 @@
+prox: Running processes
The run method is the simplest one, it starts the process and then blocks the current fiber until it terminates
+
The start method starts the process and returns a fiber packed into a resource. The fiber finishes when the process terminates. Canceling the fiber terminates the process.
+
The startProcess method returns a RunningProcess[O, E] interface that allows advanced some operations
+
+
+
Similarly for a process group, there is a run, a start and a startProcessGroup method but with different result types.
The keys of the maps are the original process values used in the piping operations.
+
\ No newline at end of file
diff --git a/docs/index.html b/docs/index.html
new file mode 100644
index 00000000..99ea5140
--- /dev/null
+++ b/docs/index.html
@@ -0,0 +1,32 @@
+prox: Getting started
\ No newline at end of file
diff --git a/docs/migration.html b/docs/migration.html
new file mode 100644
index 00000000..8fb4921c
--- /dev/null
+++ b/docs/migration.html
@@ -0,0 +1,65 @@
+prox: Migration
The start method on processes now requires a blockingExecutionContext argument
+
Ignore has been renamed to Drain
+
Log has been renamed to ToVector
+
+
+
from 0.2 to 0.4
+
+
+
Process now takes the effect type as parameter, so in case of cats-effect, Process(...) becomes Process[IO](...)
+
The start method on processes now gets a Blocker instead of an execution context
+
+
+
from 0.4 to 0.5
+
+
0.5 is a complete rewrite of the original library, and the API changed a lot, especially
+if the process types were used in code to pass around / wrap them. Please refer to the other
+sections of the documentation to learn how to reimplement them. For simple use cases where
+constructing and running the processes directly the main differences are:
The runner is responsible for stating the native processes and wiring all the redirections together. The default
+implementation is called JVMProcessRunner.
+
+
There are use cases when providing a custom runner makes sense. One such use case could be to launch external processes
+within a docker container in case of running on a development machine (for example from tests), while running them directly
+in production, when the whole service is running within the container.
+
+
We can implement this scenario by using JVMProcessRunner in production and a custom DockerizedProcessRunner in tests,
+where we define the latter as follows:
\ No newline at end of file
diff --git a/docs/zstream/customize.html b/docs/zstream/customize.html
new file mode 100644
index 00000000..b3c935eb
--- /dev/null
+++ b/docs/zstream/customize.html
@@ -0,0 +1,225 @@
+prox: Customizing environment
The type returned by the Process constructor also implements the ProcessConfiguration trait,
+adding three methods that can be used to customize the working environment of the process to be started:
+
+
Working directory
+
+
The in method can be used to customize the working directory:
The subprocess inherits the parent process environment, so it may be necessary to
+remove some already defined environment variables with the without method:
Because these methods are part of the ProcessConfigurationcapability, writing reusable functions require us to define
+a polymorphic function that requires this capability:
\ No newline at end of file
diff --git a/docs/zstream/index.html b/docs/zstream/index.html
new file mode 100644
index 00000000..b3e012f4
--- /dev/null
+++ b/docs/zstream/index.html
@@ -0,0 +1,134 @@
+prox: Getting started
Once we have our process specification ready, we can start the process with one of the
+IO functions on process.
+
+
But for this we first have to have a ProcessRunner implementation in scope. The default
+one is called JVMProcessRunner and it can be created in the following way:
With the runner in place we can use several methods to start the process.
+The simplest one is called run and it blocks the active thread until the process finishes
+running:
The result of this IO action is a ProcessResult[O, E], with the ability to observe the
+exit code and the redirected output and error values. In our first example both O and
+E were Unit because the default is to redirect output and error to the standard output and
+standard error streams.
+
\ No newline at end of file
diff --git a/docs/zstream/processgroups.html b/docs/zstream/processgroups.html
new file mode 100644
index 00000000..2fc98269
--- /dev/null
+++ b/docs/zstream/processgroups.html
@@ -0,0 +1,95 @@
+prox: Process groups
Connecting one process to another means that the standard output of the first process
+gets redirected to the standard input of the second process. This is implemented using
+the redirection capabilities described on the redirection page. The result
+of connecting one process to another is called a process group and it implements the
+trait ProcessGroup[O, E].
+
+
To create a process group, either:
+
+
Use the | or via methods between two unbounded processes
+
Use the | or via methods between an unbounded process group and an unbounded process
+
+
+
It is important that the process group construction must always happen before any redirection,
+the type system enforces this by requiring the involved processes to be UnboundedProcess.
+
+
+
Process.UnboundedProcess is a type alias for a process with all the redirection capabilities
A custom pipe (when using via) can be anything of the type ZStream[any, ProxError, Byte] => ZStream[any, ProxError, Byte]).
+The following not very useful example capitalizes each word coming through:
\ No newline at end of file
diff --git a/docs/zstream/redirection.html b/docs/zstream/redirection.html
new file mode 100644
index 00000000..4bafed59
--- /dev/null
+++ b/docs/zstream/redirection.html
@@ -0,0 +1,471 @@
+prox: Redirection
Similarly to customization, redirection is also implemented with capability traits.
+The ProcessIO type returned by the Process constructor implements all the three redirection capability
+traits:
+
+
+
RedirectableInput marks that the standard input of the process is not bound yet
+
RedirectableOutput marks that the standard output of the process is not bound yet
+
RedirectableError marks that the standard error output of the process is not bound yet
+
+
+
Each of the three channels can be only redirected once. The result type of each redirection method no longer
+implements the given capability.
+
+
Let’s see an example of this (redirection methods are described below on this page):
It is no longer possible to redirect the output of proc2:
+
+
valproc3=proc2>?(ZPipeline.utf8Decode>>>ZPipeline.splitLines)
+// error: value >? is not a member of io.github.vigoo.prox.zstream.Process.ProcessImplO[String]
+// did you mean !>??
+// val proc3 = proc2 >? (ZPipeline.utf8Decode >>> ZPipeline.splitLines)
+// ^^^^^^^^
+
+
+
Many redirection methods have an operator version but all of them have alphanumberic
+variants as well.
+
+
Input redirection
+
Input redirection is enabled by the RedirectableInput trait. The following operations
+are supported:
+
+
+
+
+
operator
+
alternative
+
parameter type
+
what it does
+
+
+
+
+
<
+
fromFile
+
java.nio.file.Path
+
Natively attach a source file to STDIN
+
+
+
<
+
fromStream
+
ZStream[Any, ProxError, Byte]
+
Attach a ZIO byte stream to STDIN
+
+
+
!<
+
fromStream
+
ZStream[Any, ProxError, Byte]
+
Attach a ZIO byte stream to STDIN and flush after each chunk
+
+
+
+
+
Output redirection
+
Output redirection is enabled by the RedirectableOutput trait.
+The following operations are supported:
Sends STDOUT through the stream and folds the result using a custom fold function
+
+
+
+
+
All the variants that accept a stream transformation (ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O])) are also usable by directly passing
+a ZPipeline.
+
+
TransformAndSink encapsulates a stream transformation and a unit sink. It is possible to use a sink directly if transformation is not needed.
Sends STDERR through the pipe and folds the result using a custom fold function
+
+
+
+
+
Redirection for process groups
+
Process groups are two or more processes attached together through pipes.
+This connection is internally implemented using the above described redirection capabilities.
+This means that all but the first process has their inputs bound, and all but the last one has
+their outputs bound. Redirection of input and output for a process group is thus a well defined
+operation meaning redirection of input of the first process and redirection of output of the last process.
+
+
For this reason the class created via process piping implements the RedirectableInput and
+RedirectableOutput traits described above.
+
+
For the sake of simplicity the library does not support anymore the fully customizable
+per-process error redirection for process groups, but a reduced but still quite expressive
+version described by the RedirectableErrors trait.
+
+
The methods in this trait define error redirection for all process in the group at once:
Sends STDERR through the stream and folds the result using a custom fold function
+
+
+
+
+
Redirection to file is not possible through this interface as only a single path could be
+provided.
+The result of these redirections is accessible through the ProcessGroupResult interface as
+it is described in the running processes section.
+
+
By using the RedirectableErrors.customizedPerProcess interface (having the type
+RedirectableErrors.CustomizedPerProcess) it is possible to customize the redirection
+targets per process while keeping their types uniform:
+
+
+
+
+
operator
+
alternative
+
parameter type
+
result type
+
what it does
+
+
+
+
+
+
errorsToFile
+
Process => java.nio.file.Path
+
Unit
+
Natively attach STDERR to a file
+
+
+
+
appendErrorsToFile
+
Process => java.nio.file.Path
+
Unit
+
Natively attach STDERR to a file in append mode
+
+
+
+
errorsToSink
+
Process => TransformAndSink[Byte, _]
+
Unit
+
Drains the STDERR through the given sink
+
+
+
+
errorsToFoldMonoid
+
Process => [O: Monoid](ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O])
+
O
+
Sends STDERR through the stream and folds the result using its monoid instance
+
+
+
+
errorsToVector
+
Process => ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O])
+
Vector[O]
+
Sends STDERR through the stream and collects the results
+
+
+
+
drainErrors
+
Process => ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O])
+
Unit
+
Drains the STDERR through the given stream
+
+
+
+
foldErrors
+
Process => ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O]), R, (R, O) => R
+
R
+
Sends STDERR through the stream and folds the result using a custom fold function
+
+
+
+
+
Let’s see an example of how this works!
+
+
First we define a queue where we want to send error lines from all the involved
+processes, then we define the two processes separately, connect them with a pipe and
+customize the error redirection where we prefix the parsed lines based on which
+process they came from:
The Process object contains several useful type aliases for writing functions that work with any process by
+only specifying what redirection channels we want unbounded.
+
+
The UnboundProcess represents a process which is fully unbound, no redirection has been done yet. It is
+defined as follows:
\ No newline at end of file
diff --git a/docs/zstream/running.html b/docs/zstream/running.html
new file mode 100644
index 00000000..08f41427
--- /dev/null
+++ b/docs/zstream/running.html
@@ -0,0 +1,90 @@
+prox: Running processes
The run method is the simplest one, it starts the process and then blocks the current fiber until it terminates
+
The start method starts the process and returns a fiber packed into a resource. The fiber finishes when the process terminates. Canceling the fiber terminates the process.
+
The startProcess method returns a RunningProcess[O, E] interface that allows advanced some operations
+
+
+
Similarly for a process group, there is a run, a start and a startProcessGroup method but with different result types.
The keys of the maps are the original process values used in the piping operations.
+
\ No newline at end of file
diff --git a/examples/externalpyproc/init.sh b/examples/externalpyproc/init.sh
deleted file mode 100755
index 86de7abc..00000000
--- a/examples/externalpyproc/init.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/usr/bin/env bash
-
-set -eo pipefail
-IFS=$'\n\t'
-
-rm -rf virtualenv
-virtualenv --setuptools --no-site-packages -p python2.7 virtualenv
diff --git a/examples/externalpyproc/test.py b/examples/externalpyproc/test.py
deleted file mode 100644
index f7199b5a..00000000
--- a/examples/externalpyproc/test.py
+++ /dev/null
@@ -1,15 +0,0 @@
-import sys
-
-def run():
- stop = False
-
- while not stop:
- line = sys.stdin.readline().strip()
-
- if len(line) == 0:
- stop = True
- else:
- print line + "!?!?"
- sys.stdout.flush()
-
-run()
diff --git a/highlight/LICENSE b/highlight/LICENSE
new file mode 100644
index 00000000..2250cc7e
--- /dev/null
+++ b/highlight/LICENSE
@@ -0,0 +1,29 @@
+BSD 3-Clause License
+
+Copyright (c) 2006, Ivan Sagalaev.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+* Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/highlight/highlight.pack.js b/highlight/highlight.pack.js
new file mode 100644
index 00000000..d826a435
--- /dev/null
+++ b/highlight/highlight.pack.js
@@ -0,0 +1,2 @@
+/*! highlight.js v9.16.2 | BSD3 License | git.io/hljslicense */
+!function(e){var n="object"==typeof window&&window||"object"==typeof self&&self;"undefined"==typeof exports||exports.nodeType?n&&(n.hljs=e({}),"function"==typeof define&&define.amd&&define([],function(){return n.hljs})):e(exports)}(function(a){var f=[],i=Object.keys,b={},u={},n=/^(no-?highlight|plain|text)$/i,l=/\blang(?:uage)?-([\w-]+)\b/i,t=/((^(<[^>]+>|\t|)+|(?:\n)))/gm,r={case_insensitive:"cI",lexemes:"l",contains:"c",keywords:"k",subLanguage:"sL",className:"cN",begin:"b",beginKeywords:"bK",end:"e",endsWithParent:"eW",illegal:"i",excludeBegin:"eB",excludeEnd:"eE",returnBegin:"rB",returnEnd:"rE",variants:"v",IDENT_RE:"IR",UNDERSCORE_IDENT_RE:"UIR",NUMBER_RE:"NR",C_NUMBER_RE:"CNR",BINARY_NUMBER_RE:"BNR",RE_STARTERS_RE:"RSR",BACKSLASH_ESCAPE:"BE",APOS_STRING_MODE:"ASM",QUOTE_STRING_MODE:"QSM",PHRASAL_WORDS_MODE:"PWM",C_LINE_COMMENT_MODE:"CLCM",C_BLOCK_COMMENT_MODE:"CBCM",HASH_COMMENT_MODE:"HCM",NUMBER_MODE:"NM",C_NUMBER_MODE:"CNM",BINARY_NUMBER_MODE:"BNM",CSS_NUMBER_MODE:"CSSNM",REGEXP_MODE:"RM",TITLE_MODE:"TM",UNDERSCORE_TITLE_MODE:"UTM",COMMENT:"C",beginRe:"bR",endRe:"eR",illegalRe:"iR",lexemesRe:"lR",terminators:"t",terminator_end:"tE"},_="",m={classPrefix:"hljs-",tabReplace:null,useBR:!1,languages:void 0},c="of and for in not or if then".split(" ");function C(e){return e.replace(/&/g,"&").replace(//g,">")}function E(e){return e.nodeName.toLowerCase()}function o(e){return n.test(e)}function s(e){var n,t={},r=Array.prototype.slice.call(arguments,1);for(n in e)t[n]=e[n];return r.forEach(function(e){for(n in e)t[n]=e[n]}),t}function g(e){var a=[];return function e(n,t){for(var r=n.firstChild;r;r=r.nextSibling)3===r.nodeType?t+=r.nodeValue.length:1===r.nodeType&&(a.push({event:"start",offset:t,node:r}),t=e(r,t),E(r).match(/br|hr|img|input/)||a.push({event:"stop",offset:t,node:r}));return t}(e,0),a}function d(e,n,t){var r=0,a="",i=[];function c(){return e.length&&n.length?e[0].offset!==n[0].offset?e[0].offset"}function l(e){a+=""+E(e)+">"}function o(e){("start"===e.event?u:l)(e.node)}for(;e.length||n.length;){var s=c();if(a+=C(t.substring(r,s[0].offset)),r=s[0].offset,s===e){for(i.reverse().forEach(l);o(s.splice(0,1)[0]),(s=c())===e&&s.length&&s[0].offset===r;);i.reverse().forEach(u)}else"start"===s[0].event?i.push(s[0].node):i.pop(),o(s.splice(0,1)[0])}return a+C(t.substr(r))}function R(n){return n.v&&!n.cached_variants&&(n.cached_variants=n.v.map(function(e){return s(n,{v:null},e)})),n.cached_variants?n.cached_variants:function e(n){return!!n&&(n.eW||e(n.starts))}(n)?[s(n,{starts:n.starts?s(n.starts):null})]:[n]}function v(e){if(r&&!e.langApiRestored){for(var n in e.langApiRestored=!0,r)e[n]&&(e[r[n]]=e[n]);(e.c||[]).concat(e.v||[]).forEach(v)}}function p(n,r){var a={};return"string"==typeof n?t("keyword",n):i(n).forEach(function(e){t(e,n[e])}),a;function t(t,e){r&&(e=e.toLowerCase()),e.split(" ").forEach(function(e){var n=e.split("|");a[n[0]]=[t,function(e,n){return n?Number(n):function(e){return-1!=c.indexOf(e.toLowerCase())}(e)?0:1}(n[0],n[1])]})}}function O(r){function s(e){return e&&e.source||e}function f(e,n){return new RegExp(s(e),"m"+(r.cI?"i":"")+(n?"g":""))}function a(a){var i,e,c={},u=[],l={},t=1;function n(e,n){c[t]=e,u.push([e,n]),t+=function(e){return new RegExp(e.toString()+"|").exec("").length-1}(n)+1}for(var r=0;r')+n+(t?"":_)}function l(){R+=null!=g.sL?function(){var e="string"==typeof g.sL;if(e&&!b[g.sL])return C(v);var n=e?x(g.sL,v,!0,d[g.sL]):B(v,g.sL.length?g.sL:void 0);return 0")+'"');if("end"===n.type){var r=function(e){var n=e[0],t=c(g,n);if(t){var r=g;for(r.skip?v+=n:(r.rE||r.eE||(v+=n),l(),r.eE&&(v=n));g.cN&&(R+=_),g.skip||g.sL||(p+=g.relevance),(g=g.parent)!==t.parent;);return t.starts&&(t.endSameAsBegin&&(t.starts.eR=t.eR),o(t.starts)),r.rE?0:n.length}}(n);if(null!=r)return r}return v+=t,t.length}var E=S(e);if(!E)throw new Error('Unknown language: "'+e+'"');O(E);var r,g=n||E,d={},R="";for(r=g;r!==E;r=r.parent)r.cN&&(R=u(r.cN,"",!0)+R);var v="",p=0;try{for(var M,N,h=0;g.t.lastIndex=h,M=g.t.exec(a);)N=t(a.substring(h,M.index),M),h=M.index+N;for(t(a.substr(h)),r=g;r.parent;r=r.parent)r.cN&&(R+=_);return{relevance:p,value:R,i:!1,language:e,top:g}}catch(e){if(e.message&&-1!==e.message.indexOf("Illegal"))return{i:!0,relevance:0,value:C(a)};throw e}}function B(t,e){e=e||m.languages||i(b);var r={relevance:0,value:C(t)},a=r;return e.filter(S).filter(T).forEach(function(e){var n=x(e,t,!1);n.language=e,n.relevance>a.relevance&&(a=n),n.relevance>r.relevance&&(a=r,r=n)}),a.language&&(r.second_best=a),r}function M(e){return m.tabReplace||m.useBR?e.replace(t,function(e,n){return m.useBR&&"\n"===e?" ":m.tabReplace?n.replace(/\t/g,m.tabReplace):""}):e}function N(e){var n,t,r,a,i,c=function(e){var n,t,r,a,i=e.className+" ";if(i+=e.parentNode?e.parentNode.className:"",t=l.exec(i))return S(t[1])?t[1]:"no-highlight";for(n=0,r=(i=i.split(/\s+/)).length;n/g,"\n"):n=e,i=n.textContent,r=c?x(c,i,!0):B(i),(t=g(n)).length&&((a=document.createElementNS("http://www.w3.org/1999/xhtml","div")).innerHTML=r.value,r.value=d(t,g(a),i)),r.value=M(r.value),e.innerHTML=r.value,e.className=function(e,n,t){var r=n?u[n]:t,a=[e.trim()];return e.match(/\bhljs\b/)||a.push("hljs"),-1===e.indexOf(r)&&a.push(r),a.join(" ").trim()}(e.className,c,r.language),e.result={language:r.language,re:r.relevance},r.second_best&&(e.second_best={language:r.second_best.language,re:r.second_best.relevance}))}function h(){if(!h.called){h.called=!0;var e=document.querySelectorAll("pre code");f.forEach.call(e,N)}}function S(e){return e=(e||"").toLowerCase(),b[e]||b[u[e]]}function T(e){var n=S(e);return n&&!n.disableAutodetect}return a.highlight=x,a.highlightAuto=B,a.fixMarkup=M,a.highlightBlock=N,a.configure=function(e){m=s(m,e)},a.initHighlighting=h,a.initHighlightingOnLoad=function(){addEventListener("DOMContentLoaded",h,!1),addEventListener("load",h,!1)},a.registerLanguage=function(n,e){var t=b[n]=e(a);v(t),t.rawDefinition=e.bind(null,a),t.aliases&&t.aliases.forEach(function(e){u[e]=n})},a.listLanguages=function(){return i(b)},a.getLanguage=S,a.autoDetection=T,a.inherit=s,a.IR=a.IDENT_RE="[a-zA-Z]\\w*",a.UIR=a.UNDERSCORE_IDENT_RE="[a-zA-Z_]\\w*",a.NR=a.NUMBER_RE="\\b\\d+(\\.\\d+)?",a.CNR=a.C_NUMBER_RE="(-?)(\\b0[xX][a-fA-F0-9]+|(\\b\\d+(\\.\\d*)?|\\.\\d+)([eE][-+]?\\d+)?)",a.BNR=a.BINARY_NUMBER_RE="\\b(0b[01]+)",a.RSR=a.RE_STARTERS_RE="!|!=|!==|%|%=|&|&&|&=|\\*|\\*=|\\+|\\+=|,|-|-=|/=|/|:|;|<<|<<=|<=|<|===|==|=|>>>=|>>=|>=|>>>|>>|>|\\?|\\[|\\{|\\(|\\^|\\^=|\\||\\|=|\\|\\||~",a.BE=a.BACKSLASH_ESCAPE={b:"\\\\[\\s\\S]",relevance:0},a.ASM=a.APOS_STRING_MODE={cN:"string",b:"'",e:"'",i:"\\n",c:[a.BE]},a.QSM=a.QUOTE_STRING_MODE={cN:"string",b:'"',e:'"',i:"\\n",c:[a.BE]},a.PWM=a.PHRASAL_WORDS_MODE={b:/\b(a|an|the|are|I'm|isn't|don't|doesn't|won't|but|just|should|pretty|simply|enough|gonna|going|wtf|so|such|will|you|your|they|like|more)\b/},a.C=a.COMMENT=function(e,n,t){var r=a.inherit({cN:"comment",b:e,e:n,c:[]},t||{});return r.c.push(a.PWM),r.c.push({cN:"doctag",b:"(?:TODO|FIXME|NOTE|BUG|XXX):",relevance:0}),r},a.CLCM=a.C_LINE_COMMENT_MODE=a.C("//","$"),a.CBCM=a.C_BLOCK_COMMENT_MODE=a.C("/\\*","\\*/"),a.HCM=a.HASH_COMMENT_MODE=a.C("#","$"),a.NM=a.NUMBER_MODE={cN:"number",b:a.NR,relevance:0},a.CNM=a.C_NUMBER_MODE={cN:"number",b:a.CNR,relevance:0},a.BNM=a.BINARY_NUMBER_MODE={cN:"number",b:a.BNR,relevance:0},a.CSSNM=a.CSS_NUMBER_MODE={cN:"number",b:a.NR+"(%|em|ex|ch|rem|vw|vh|vmin|vmax|cm|mm|in|pt|pc|px|deg|grad|rad|turn|s|ms|Hz|kHz|dpi|dpcm|dppx)?",relevance:0},a.RM=a.REGEXP_MODE={cN:"regexp",b:/\//,e:/\/[gimuy]*/,i:/\n/,c:[a.BE,{b:/\[/,e:/\]/,relevance:0,c:[a.BE]}]},a.TM=a.TITLE_MODE={cN:"title",b:a.IR,relevance:0},a.UTM=a.UNDERSCORE_TITLE_MODE={cN:"title",b:a.UIR,relevance:0},a.METHOD_GUARD={b:"\\.\\s*"+a.UIR,relevance:0},a});hljs.registerLanguage("java",function(e){var a="false synchronized int abstract float private char boolean var static null if const for true while long strictfp finally protected import native final void enum else break transient catch instanceof byte super volatile case assert short package default double public try this switch continue throws protected public private module requires exports do",t={cN:"number",b:"\\b(0[bB]([01]+[01_]+[01]+|[01]+)|0[xX]([a-fA-F0-9]+[a-fA-F0-9_]+[a-fA-F0-9]+|[a-fA-F0-9]+)|(([\\d]+[\\d_]+[\\d]+|[\\d]+)(\\.([\\d]+[\\d_]+[\\d]+|[\\d]+))?|\\.([\\d]+[\\d_]+[\\d]+|[\\d]+))([eE][-+]?\\d+)?)[lLfF]?",relevance:0};return{aliases:["jsp"],k:a,i:/<\/|#/,c:[e.C("/\\*\\*","\\*/",{relevance:0,c:[{b:/\w+@/,relevance:0},{cN:"doctag",b:"@[A-Za-z]+"}]}),e.CLCM,e.CBCM,e.ASM,e.QSM,{cN:"class",bK:"class interface",e:/[{;=]/,eE:!0,k:"class interface",i:/[:"\[\]]/,c:[{bK:"extends implements"},e.UTM]},{bK:"new throw return else",relevance:0},{cN:"function",b:"([À-ʸa-zA-Z_$][À-ʸa-zA-Z_$0-9]*(<[À-ʸa-zA-Z_$][À-ʸa-zA-Z_$0-9]*(\\s*,\\s*[À-ʸa-zA-Z_$][À-ʸa-zA-Z_$0-9]*)*>)?\\s+)+"+e.UIR+"\\s*\\(",rB:!0,e:/[{;=]/,eE:!0,k:a,c:[{b:e.UIR+"\\s*\\(",rB:!0,relevance:0,c:[e.UTM]},{cN:"params",b:/\(/,e:/\)/,k:a,relevance:0,c:[e.ASM,e.QSM,e.CNM,e.CBCM]},e.CLCM,e.CBCM]},t,{cN:"meta",b:"@[A-Za-z]+"}]}});hljs.registerLanguage("scala",function(e){var t={cN:"subst",v:[{b:"\\$[A-Za-z0-9_]+"},{b:"\\${",e:"}"}]},a={cN:"string",v:[{b:'"',e:'"',i:"\\n",c:[e.BE]},{b:'"""',e:'"""',relevance:10},{b:'[a-z]+"',e:'"',i:"\\n",c:[e.BE,t]},{cN:"string",b:'[a-z]+"""',e:'"""',c:[t],relevance:10}]},c={cN:"type",b:"\\b[A-Z][A-Za-z0-9_]*",relevance:0},r={cN:"title",b:/[^0-9\n\t "'(),.`{}\[\]:;][^\n\t "'(),.`{}\[\]:;]+|[^0-9\n\t "'(),.`{}\[\]:;=]/,relevance:0},n={cN:"class",bK:"class object trait type",e:/[:={\[\n;]/,eE:!0,c:[{bK:"extends with",relevance:10},{b:/\[/,e:/\]/,eB:!0,eE:!0,relevance:0,c:[c]},{cN:"params",b:/\(/,e:/\)/,eB:!0,eE:!0,relevance:0,c:[c]},r]},l={cN:"function",bK:"def",e:/[:={\[(\n;]/,eE:!0,c:[r]};return{k:{literal:"true false null",keyword:"type yield lazy override def with val var sealed abstract private trait object if forSome for while throw finally protected extends import final return else break new catch super class case package default try this match continue throws implicit"},c:[e.CLCM,e.CBCM,a,{cN:"symbol",b:"'\\w[\\w\\d_]*(?!')"},c,l,n,e.CNM,{cN:"meta",b:"@[A-Za-z]+"}]}});hljs.registerLanguage("bash",function(e){var t={cN:"variable",v:[{b:/\$[\w\d#@][\w\d_]*/},{b:/\$\{(.*?)}/}]},a={cN:"string",b:/"/,e:/"/,c:[e.BE,t,{cN:"variable",b:/\$\(/,e:/\)/,c:[e.BE]}]};return{aliases:["sh","zsh"],l:/\b-?[a-z\._]+\b/,k:{keyword:"if then else elif fi for while in do done case esac function",literal:"true false",built_in:"break cd continue eval exec exit export getopts hash pwd readonly return shift test times trap umask unset alias bind builtin caller command declare echo enable help let local logout mapfile printf read readarray source type typeset ulimit unalias set shopt autoload bg bindkey bye cap chdir clone comparguments compcall compctl compdescribe compfiles compgroups compquote comptags comptry compvalues dirs disable disown echotc echoti emulate fc fg float functions getcap getln history integer jobs kill limit log noglob popd print pushd pushln rehash sched setcap setopt stat suspend ttyctl unfunction unhash unlimit unsetopt vared wait whence where which zcompile zformat zftp zle zmodload zparseopts zprof zpty zregexparse zsocket zstyle ztcp",_:"-ne -eq -lt -gt -f -d -e -s -l -a"},c:[{cN:"meta",b:/^#![^\n]+sh\s*$/,relevance:10},{cN:"function",b:/\w[\w\d_]*\s*\(\s*\)\s*\{/,rB:!0,c:[e.inherit(e.TM,{b:/\w[\w\d_]*/})],relevance:0},e.HCM,a,{cN:"",b:/\\"/},{cN:"string",b:/'/,e:/'/},t]}});
\ No newline at end of file
diff --git a/highlight/styles/vs.css b/highlight/styles/vs.css
new file mode 100644
index 00000000..c5d07d31
--- /dev/null
+++ b/highlight/styles/vs.css
@@ -0,0 +1,68 @@
+/*
+
+Visual Studio-like style based on original C# coloring by Jason Diamond
+
+*/
+.hljs {
+ display: block;
+ overflow-x: auto;
+ padding: 0.5em;
+ background: white;
+ color: black;
+}
+
+.hljs-comment,
+.hljs-quote,
+.hljs-variable {
+ color: #008000;
+}
+
+.hljs-keyword,
+.hljs-selector-tag,
+.hljs-built_in,
+.hljs-name,
+.hljs-tag {
+ color: #00f;
+}
+
+.hljs-string,
+.hljs-title,
+.hljs-section,
+.hljs-attribute,
+.hljs-literal,
+.hljs-template-tag,
+.hljs-template-variable,
+.hljs-type,
+.hljs-addition {
+ color: #a31515;
+}
+
+.hljs-deletion,
+.hljs-selector-attr,
+.hljs-selector-pseudo,
+.hljs-meta {
+ color: #2b91af;
+}
+
+.hljs-doctag {
+ color: #808080;
+}
+
+.hljs-attr {
+ color: #f00;
+}
+
+.hljs-symbol,
+.hljs-bullet,
+.hljs-link {
+ color: #00b0e8;
+}
+
+
+.hljs-emphasis {
+ font-style: italic;
+}
+
+.hljs-strong {
+ font-weight: bold;
+}
diff --git a/img/favicon114x114.png b/img/favicon114x114.png
new file mode 100644
index 00000000..a1c6ff0e
Binary files /dev/null and b/img/favicon114x114.png differ
diff --git a/img/favicon120x120.png b/img/favicon120x120.png
new file mode 100644
index 00000000..32f480e0
Binary files /dev/null and b/img/favicon120x120.png differ
diff --git a/img/favicon128x128.png b/img/favicon128x128.png
new file mode 100644
index 00000000..a19298e8
Binary files /dev/null and b/img/favicon128x128.png differ
diff --git a/img/favicon144x144.png b/img/favicon144x144.png
new file mode 100644
index 00000000..305a0f00
Binary files /dev/null and b/img/favicon144x144.png differ
diff --git a/img/favicon150x150.png b/img/favicon150x150.png
new file mode 100644
index 00000000..6ed8c398
Binary files /dev/null and b/img/favicon150x150.png differ
diff --git a/img/favicon152x152.png b/img/favicon152x152.png
new file mode 100644
index 00000000..588ef3be
Binary files /dev/null and b/img/favicon152x152.png differ
diff --git a/img/favicon16x16.png b/img/favicon16x16.png
new file mode 100644
index 00000000..c2855832
Binary files /dev/null and b/img/favicon16x16.png differ
diff --git a/img/favicon196x196.png b/img/favicon196x196.png
new file mode 100644
index 00000000..f9056813
Binary files /dev/null and b/img/favicon196x196.png differ
diff --git a/img/favicon24x24.png b/img/favicon24x24.png
new file mode 100644
index 00000000..a902555c
Binary files /dev/null and b/img/favicon24x24.png differ
diff --git a/img/favicon310x150.png b/img/favicon310x150.png
new file mode 100644
index 00000000..59d939b3
Binary files /dev/null and b/img/favicon310x150.png differ
diff --git a/img/favicon310x310.png b/img/favicon310x310.png
new file mode 100644
index 00000000..c0f49c4b
Binary files /dev/null and b/img/favicon310x310.png differ
diff --git a/img/favicon32x32.png b/img/favicon32x32.png
new file mode 100644
index 00000000..eeb94977
Binary files /dev/null and b/img/favicon32x32.png differ
diff --git a/img/favicon48x48.png b/img/favicon48x48.png
new file mode 100644
index 00000000..47191429
Binary files /dev/null and b/img/favicon48x48.png differ
diff --git a/img/favicon57x57.png b/img/favicon57x57.png
new file mode 100644
index 00000000..674665d9
Binary files /dev/null and b/img/favicon57x57.png differ
diff --git a/img/favicon60x60.png b/img/favicon60x60.png
new file mode 100644
index 00000000..53ce2de1
Binary files /dev/null and b/img/favicon60x60.png differ
diff --git a/img/favicon64x64.png b/img/favicon64x64.png
new file mode 100644
index 00000000..4d617902
Binary files /dev/null and b/img/favicon64x64.png differ
diff --git a/img/favicon70x70.png b/img/favicon70x70.png
new file mode 100644
index 00000000..e8f7b461
Binary files /dev/null and b/img/favicon70x70.png differ
diff --git a/img/favicon72x72.png b/img/favicon72x72.png
new file mode 100644
index 00000000..07c5a7e0
Binary files /dev/null and b/img/favicon72x72.png differ
diff --git a/img/favicon76x76.png b/img/favicon76x76.png
new file mode 100644
index 00000000..de28d7a5
Binary files /dev/null and b/img/favicon76x76.png differ
diff --git a/img/favicon96x96.png b/img/favicon96x96.png
new file mode 100644
index 00000000..6e9a7513
Binary files /dev/null and b/img/favicon96x96.png differ
diff --git a/img/features-header.svg b/img/features-header.svg
new file mode 100644
index 00000000..fe2f1539
--- /dev/null
+++ b/img/features-header.svg
@@ -0,0 +1,83 @@
+
+
\ No newline at end of file
diff --git a/img/first-feature-icon.svg b/img/first-feature-icon.svg
new file mode 100644
index 00000000..db2f82c7
--- /dev/null
+++ b/img/first-feature-icon.svg
@@ -0,0 +1,15 @@
+
+
\ No newline at end of file
diff --git a/img/first_icon.png b/img/first_icon.png
new file mode 100644
index 00000000..0283901e
Binary files /dev/null and b/img/first_icon.png differ
diff --git a/img/first_icon2x.png b/img/first_icon2x.png
new file mode 100644
index 00000000..4a0644df
Binary files /dev/null and b/img/first_icon2x.png differ
diff --git a/img/jumbotron_pattern.png b/img/jumbotron_pattern.png
new file mode 100644
index 00000000..ec5dde12
Binary files /dev/null and b/img/jumbotron_pattern.png differ
diff --git a/img/jumbotron_pattern2x.png b/img/jumbotron_pattern2x.png
new file mode 100644
index 00000000..0121a2e7
Binary files /dev/null and b/img/jumbotron_pattern2x.png differ
diff --git a/img/light-navbar-brand.svg b/img/light-navbar-brand.svg
new file mode 100644
index 00000000..449f9f6c
--- /dev/null
+++ b/img/light-navbar-brand.svg
@@ -0,0 +1,41 @@
+
+
diff --git a/img/light-sidebar-brand.svg b/img/light-sidebar-brand.svg
new file mode 100644
index 00000000..bd22d534
--- /dev/null
+++ b/img/light-sidebar-brand.svg
@@ -0,0 +1,41 @@
+
+
diff --git a/img/light_navbar_brand.png b/img/light_navbar_brand.png
new file mode 100644
index 00000000..da8752bb
Binary files /dev/null and b/img/light_navbar_brand.png differ
diff --git a/img/navbar_brand.png b/img/navbar_brand.png
new file mode 100644
index 00000000..1c2333c3
Binary files /dev/null and b/img/navbar_brand.png differ
diff --git a/img/navbar_brand2x.png b/img/navbar_brand2x.png
new file mode 100644
index 00000000..40b50ced
Binary files /dev/null and b/img/navbar_brand2x.png differ
diff --git a/docs/src/microsite/img/second-feature-icon.svg b/img/second-feature-icon.svg
similarity index 100%
rename from docs/src/microsite/img/second-feature-icon.svg
rename to img/second-feature-icon.svg
diff --git a/img/second_icon.png b/img/second_icon.png
new file mode 100644
index 00000000..0c7749cb
Binary files /dev/null and b/img/second_icon.png differ
diff --git a/img/second_icon2x.png b/img/second_icon2x.png
new file mode 100644
index 00000000..439b643c
Binary files /dev/null and b/img/second_icon2x.png differ
diff --git a/img/sidebar_brand.png b/img/sidebar_brand.png
new file mode 100644
index 00000000..8c4a1f6d
Binary files /dev/null and b/img/sidebar_brand.png differ
diff --git a/img/sidebar_brand2x.png b/img/sidebar_brand2x.png
new file mode 100644
index 00000000..652837d4
Binary files /dev/null and b/img/sidebar_brand2x.png differ
diff --git a/docs/src/microsite/img/third-feature-icon.svg b/img/third-feature-icon.svg
similarity index 100%
rename from docs/src/microsite/img/third-feature-icon.svg
rename to img/third-feature-icon.svg
diff --git a/img/third_icon.png b/img/third_icon.png
new file mode 100644
index 00000000..ba07d7c1
Binary files /dev/null and b/img/third_icon.png differ
diff --git a/img/third_icon2x.png b/img/third_icon2x.png
new file mode 100644
index 00000000..030e8f42
Binary files /dev/null and b/img/third_icon2x.png differ
diff --git a/index.html b/index.html
new file mode 100644
index 00000000..88dd88fe
--- /dev/null
+++ b/index.html
@@ -0,0 +1,32 @@
+prox: prox: Home
\ No newline at end of file
diff --git a/js/docs.js b/js/docs.js
new file mode 100644
index 00000000..49eb7f42
--- /dev/null
+++ b/js/docs.js
@@ -0,0 +1,163 @@
+/**
+ * Toggle an specific class to the received DOM element.
+ * @param {string} elemSelector The query selector specifying the target element.
+ * @param {string} [activeClass='active'] The class to be applied/removed.
+ */
+function toggleClass(elemSelector, activeClass = "active") {
+ const elem = document.querySelector(elemSelector);
+ if (elem) {
+ elem.classList.toggle(activeClass);
+ }
+}
+
+/**
+ * Toggle specific classes to an array of corresponding DOM elements.
+ * @param {Array} elemSelectors The query selectors specifying the target elements.
+ * @param {Array} activeClasses The classes to be applied/removed.
+ */
+function toggleClasses(elemSelectors, activeClasses) {
+ elemSelectors.map((elemSelector, idx) => {
+ toggleClass(elemSelector, activeClasses[idx]);
+ });
+}
+
+/**
+ * Remove active class from siblings DOM elements and apply it to event target.
+ * @param {Element} element The element receiving the class, and whose siblings will lose it.
+ * @param {string} [activeClass='active'] The class to be applied.
+ */
+function activate(element, activeClass = "active") {
+ [...element.parentNode.children].map(elem =>
+ elem.classList.remove(activeClass)
+ );
+ element.classList.add(activeClass);
+}
+
+/**
+ * Remove active class from siblings parent DOM elements and apply it to element target parent.
+ * @param {Element} element The element receiving the class, and whose siblings will lose it.
+ * @param {string} [activeClass='active'] The class to be applied.
+ */
+function activateParent(element, activeClass = "active") {
+ const elemParent = element.parentNode;
+ activate(elemParent, activeClass);
+}
+
+/**
+ * Remove active class from siblings parent DOM elements and apply it to element target parent.
+ * @param {Element} element The element receiving the class, and whose siblings will lose it.
+ * @param {string} [activeClass='active'] The class to be applied.
+ */
+function toggleParent(element, activeClass = "active") {
+ const elemParent = element.parentNode;
+ if (elemParent) {
+ elemParent.classList.toggle(activeClass);
+ }
+}
+
+/**
+ * This will make the specified elements click event to show/hide the menu sidebar.
+ */
+function activateToggle() {
+ const menuToggles = document.querySelectorAll("#menu-toggle, #main-toggle");
+ if (menuToggles) {
+ [...menuToggles].map(elem => {
+ elem.onclick = e => {
+ e.preventDefault();
+ toggleClass("#wrapper", "toggled");
+ };
+ });
+ }
+}
+
+/**
+ * This will make the specified elements click event to behave as a menu
+ * parent entry, or a link, or sometimes both, depending on the context.
+ */
+function activateMenuNesting() {
+ const menuParents = document.querySelectorAll(".drop-nested");
+ if (menuParents) {
+ [...menuParents].map(elem => {
+ elem.onclick = e => {
+ e.preventDefault();
+ toggleParent(elem, "open");
+ const elementType = e.currentTarget.tagName.toLowerCase();
+ if (elementType === "a") {
+ const linkElement = e.currentTarget;
+ const linkElementParent = linkElement.parentNode;
+ const destination = linkElement.href;
+ if (
+ destination !== window.location.href &&
+ !linkElementParent.classList.contains("active")
+ ) {
+ window.location.href = destination;
+ }
+ }
+ };
+ });
+ }
+}
+
+/**
+ * Aux function to retrieve repository stars and watchers count info from
+ * GitHub API and set it on its proper nodes.
+ */
+async function loadGitHubStats() {
+ const content = document.querySelector("#content");
+ const ghOwner = content.dataset.githubOwner;
+ const ghRepo = content.dataset.githubRepo;
+
+ if (ghOwner && ghRepo) {
+ const ghAPI = `https://api.github.com/repos/${ghOwner}/${ghRepo}`;
+ const ghDataResponse = await fetch(ghAPI);
+ const ghData = await ghDataResponse.json();
+ const watchersElement = document.querySelector("#eyes");
+ const starsElement = document.querySelector("#stars");
+ watchersElement.textContent = ghData.subscribers_count;
+ starsElement.textContent = ghData.stargazers_count;
+ }
+}
+
+/**
+ * Function to create an anchor with an specific id
+ * @param {string} id The corresponding id from which the href will be created.
+ * @returns {Element} The new created anchor.
+ */
+function anchorForId(id) {
+ const anchor = document.createElement("a");
+ anchor.className = "header-link";
+ anchor.href = `#${id}`;
+ anchor.innerHTML = '';
+ return anchor;
+}
+
+/**
+ * Aux function to retrieve repository stars and watchers count info from
+ * @param {string} level The specific level to select header from.
+ * @param {Element} containingElement The element receiving the anchor.
+ */
+function linkifyAnchors(level, containingElement) {
+ const headers = containingElement.getElementsByTagName(`h${level}`);
+ [...headers].map(header => {
+ if (typeof header.id !== "undefined" && header.id !== "") {
+ header.append(anchorForId(header.id));
+ }
+ });
+}
+
+/**
+ * Function
+ */
+function linkifyAllLevels() {
+ const content = document.querySelector("#content");
+ [...Array(7).keys()].map(level => {
+ linkifyAnchors(level, content);
+ });
+}
+
+window.addEventListener("DOMContentLoaded", () => {
+ activateToggle();
+ activateMenuNesting();
+ loadGitHubStats();
+ linkifyAllLevels();
+});
diff --git a/js/main.js b/js/main.js
new file mode 100644
index 00000000..01865e18
--- /dev/null
+++ b/js/main.js
@@ -0,0 +1,73 @@
+jQuery(document).ready(function() {
+ hljs.initHighlightingOnLoad();
+ activeToggle();
+ loadGitHubStats();
+ linkifyAllLevels(".docs .content-wrapper");
+});
+
+
+function activeToggle() {
+ $("#menu-toggle").click(function(e) {
+ e.preventDefault();
+ $("#wrapper").toggleClass("toggled");
+ });
+}
+
+var anchorForId = function (id) {
+ var anchor = document.createElement("a");
+ anchor.className = "header-link";
+ anchor.href = "#" + id;
+ anchor.innerHTML = "";
+ return anchor;
+};
+
+var linkifyAnchors = function (level, containingElement) {
+ var headers = containingElement.getElementsByTagName("h" + level);
+ for (var h = 0; h < headers.length; h++) {
+ var header = headers[h];
+
+ if (typeof header.id !== "undefined" && header.id !== "") {
+ header.appendChild(anchorForId(header.id));
+ }
+ }
+};
+
+var linkifyAllLevels = function (blockSelector) {
+ var contentBlock = document.querySelector(blockSelector);
+ if (!contentBlock) {
+ return;
+ }
+ for (var level = 1; level <= 6; level++) {
+ linkifyAnchors(level, contentBlock);
+ }
+};
+
+var baseURL = window.location.href;
+
+function shareSiteFacebook(text) {
+ launchPopup('http://www.facebook.com/sharer/sharer.php?u='+baseURL+'&t=' + text);
+}
+
+function shareSiteTwitter(text) {
+ launchPopup('https://twitter.com/home?status=' + text);
+ return false;
+}
+
+function launchPopup(url) {
+ window.open(url, 'Social Share', 'height=320, width=640, toolbar=no, menubar=no, scrollbars=no, resizable=no, location=no, directories=no, status=no');
+}
+
+function loadGitHubStats() {
+ var content = $("#content");
+ var githubOwner = content.attr("data-github-owner")
+ var githubRepo = content.attr("data-github-repo")
+
+ if(githubOwner && githubRepo) {
+ var gitHubAPI = "https://api.github.com/repos/" + githubOwner + "/" + githubRepo + "?callback=?";
+ $.getJSON(gitHubAPI).done(function(data) {
+ $('#eyes').text(data.data.subscribers_count);
+ $('#stars').text(data.data.stargazers_count);
+ });
+ }
+
+}
diff --git a/js/search.js b/js/search.js
new file mode 100644
index 00000000..661db1e7
--- /dev/null
+++ b/js/search.js
@@ -0,0 +1,265 @@
+// When the user clicks on the search box, we want to toggle the search dropdown
+function displayToggleSearch(e) {
+ e.preventDefault();
+ e.stopPropagation();
+
+ closeDropdownSearch(e);
+
+ if (idx === null) {
+ console.log("Building search index...");
+ prepareIdxAndDocMap();
+ console.log("Search index built.");
+ }
+ const dropdown = document.querySelector("#search-dropdown-content");
+ if (dropdown) {
+ if (!dropdown.classList.contains("show")) {
+ dropdown.classList.add("show");
+ }
+ document.addEventListener("click", closeDropdownSearch);
+ document.addEventListener("keydown", searchOnKeyDown);
+ document.addEventListener("keyup", searchOnKeyUp);
+ }
+}
+
+//We want to prepare the index only after clicking the search bar
+var idx = null
+const docMap = new Map()
+
+function prepareIdxAndDocMap() {
+ const docs = [
+ {
+ "title": "Blog posts",
+ "url": "/prox/docs/blogposts.html",
+ "content": "Blog posts The following series of blog posts are based on the development of prox: Part 1 - type level programming Part 2 - Akka Streams with Cats Effect Part 3 - Effect abstraction and ZIO Part 4 - Simplified redesign"
+ } ,
+ {
+ "title": "Custom runners",
+ "url": "/prox/docs/zstream/custom-runners.html",
+ "content": "Customizing the runner The runner is responsible for stating the native processes and wiring all the redirections together. The default implementation is called JVMProcessRunner. There are use cases when providing a custom runner makes sense. One such use case could be to launch external processes within a docker container in case of running on a development machine (for example from tests), while running them directly in production, when the whole service is running within the container. We can implement this scenario by using JVMProcessRunner in production and a custom DockerizedProcessRunner in tests, where we define the latter as follows: import java.nio.file.Path import java.util.UUID case class DockerImage(name: String) case class DockerContainer(name: String) case class DockerProcessInfo[DockerProcessInfo](container: DockerContainer, dockerProcessInfo: DockerProcessInfo) class DockerizedProcessRunner[Info](processRunner: ProcessRunner[Info], mountedDirectory: Path, workingDirectory: Path, image: DockerImage) extends ProcessRunner[DockerProcessInfo[Info]] { override def startProcess[O, E](process: Process[O, E]): ZIO[Any, ProxError, RunningProcess[O, E, DockerProcessInfo[Info]]] = { for { container <- generateContainerName runningProcess <- processRunner .startProcess(wrapInDocker(process, container)) } yield runningProcess.mapInfo(info => DockerProcessInfo(container, info)) } override def startProcessGroup[O, E](processGroup: ProcessGroup[O, E]): ZIO[Any, ProxError, RunningProcessGroup[O, E, DockerProcessInfo[Info]]] = { ZIO.foreach(processGroup.originalProcesses.toVector)(key => generateContainerName.map(c => key -> c)).flatMap { keyAndNames => val nameMap = keyAndNames.toMap val names = keyAndNames.map(_._2) val modifiedProcessGroup = processGroup.map(new ProcessGroup.Mapper[O, E] { def mapFirst[P <: Process[ZStream[Any, ProxError, Byte], E]](process: P): P = wrapInDocker(process, names.head).asInstanceOf[P] def mapInnerWithIdx[P <: Process.UnboundIProcess[ZStream[Any, ProxError, Byte], E]](process: P, idx: Int): P = wrapInDocker(process, names(idx)).asInstanceOf[P] def mapLast[P <: Process.UnboundIProcess[O, E]](process: P): P = wrapInDocker(process, names.last).asInstanceOf[P] }) processRunner.startProcessGroup(modifiedProcessGroup) .map(_.mapInfo { case (key, info) => DockerProcessInfo(nameMap(key), info) }) } } private def generateContainerName: ZIO[Any, ProxError, DockerContainer] = ZIO.attempt(DockerContainer(UUID.randomUUID().toString)).mapError(UnknownProxError) private def wrapInDocker[O, E](process: Process[O, E], container: DockerContainer): Process[O, E] = { val envVars = process.environmentVariables.flatMap { case (key, value) => List(\"-e\", s\"$key=$value\") }.toList process.withCommand(\"docker\").withArguments( \"run\" :: \"--name\" :: container.name :: \"-v\" :: mountedDirectory.toString :: \"-w\" :: workingDirectory.toString :: envVars ::: List(image.name, process.command) ::: process.arguments ) } }"
+ } ,
+ {
+ "title": "Custom runners",
+ "url": "/prox/docs/fs2/custom-runners.html",
+ "content": "Customizing the runner The runner is responsible for stating the native processes and wiring all the redirections together. The default implementation is called JVMProcessRunner. There are use cases when providing a custom runner makes sense. One such use case could be to launch external processes within a docker container in case of running on a development machine (for example from tests), while running them directly in production, when the whole service is running within the container. We can implement this scenario by using JVMProcessRunner in production and a custom DockerizedProcessRunner in tests, where we define the latter as follows: import java.nio.file.Path import java.util.UUID case class DockerImage(name: String) case class DockerContainer(name: String) case class DockerProcessInfo[DockerProcessInfo](container: DockerContainer, dockerProcessInfo: DockerProcessInfo) class DockerizedProcessRunner[Info](processRunner: ProcessRunner[Info], mountedDirectory: Path, workingDirectory: Path, image: DockerImage) extends ProcessRunner[DockerProcessInfo[Info]] { override def startProcess[O, E](process: Process[O, E]): IO[RunningProcess[O, E, DockerProcessInfo[Info]]] = { for { container <- generateContainerName runningProcess <- processRunner .startProcess(wrapInDocker(process, container)) } yield runningProcess.mapInfo(info => DockerProcessInfo(container, info)) } override def startProcessGroup[O, E](processGroup: ProcessGroup[O, E]): IO[RunningProcessGroup[O, E, DockerProcessInfo[Info]]] = { Traverse[Vector].sequence(processGroup.originalProcesses.toVector.map(key => generateContainerName.map(c => key -> c))).flatMap { keyAndNames => val nameMap = keyAndNames.toMap val names = keyAndNames.map(_._2) val modifiedProcessGroup = processGroup.map(new ProcessGroup.Mapper[O, E] { def mapFirst[P <: Process[fs2.Stream[IO, Byte], E]](process: P): P = wrapInDocker(process, names.head).asInstanceOf[P] def mapInnerWithIdx[P <: Process.UnboundIProcess[fs2.Stream[IO, Byte], E]](process: P, idx: Int): P = wrapInDocker(process, names(idx)).asInstanceOf[P] def mapLast[P <: Process.UnboundIProcess[O, E]](process: P): P = wrapInDocker(process, names.last).asInstanceOf[P] }) processRunner.startProcessGroup(modifiedProcessGroup) .map(_.mapInfo { case (key, info) => DockerProcessInfo(nameMap(key), info) }) } } private def generateContainerName: IO[DockerContainer] = IO(DockerContainer(UUID.randomUUID().toString)) private def wrapInDocker[O, E](process: Process[O, E], container: DockerContainer): Process[O, E] = { val envVars = process.environmentVariables.flatMap { case (key, value) => List(\"-e\", s\"$key=$value\") }.toList process.withCommand(\"docker\").withArguments( \"run\" :: \"--name\" :: container.name :: \"-v\" :: mountedDirectory.toString :: \"-w\" :: workingDirectory.toString :: envVars ::: List(image.name, process.command) ::: process.arguments ) } }"
+ } ,
+ {
+ "title": "Customizing environment",
+ "url": "/prox/docs/zstream/customize.html",
+ "content": "Customizing the environment The type returned by the Process constructor also implements the ProcessConfiguration trait, adding three methods that can be used to customize the working environment of the process to be started: Working directory The in method can be used to customize the working directory: import io.github.vigoo.prox.path._ val dir = home / \"tmp\" // dir: java.nio.file.Path = /home/runner/tmp val proc1 = Process(\"ls\") in dir // proc1: Process.ProcessImpl = ProcessImpl( // command = \"ls\", // arguments = List(), // workingDirectory = Some(value = /home/runner/tmp), // environmentVariables = Map(), // removedEnvironmentVariables = Set(), // outputRedirection = StdOut(), // runOutputStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10720/0x00000008026f7040@68413682, // errorRedirection = StdOut(), // runErrorStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10721/0x0000000802701040@21441d72, // inputRedirection = StdIn() // ) Not that dir has the type java.nio.file.Path, and the home / tmp syntax is just a thin syntax extension to produce such values. Adding environment variables The with method can be used to add environment variables to the process in the following way: val proc2 = Process(\"echo\", List(\"$TEST\")) `with` (\"TEST\" -> \"Hello world\") // proc2: Process.ProcessImpl = ProcessImpl( // command = \"echo\", // arguments = List(\"$TEST\"), // workingDirectory = None, // environmentVariables = Map(\"TEST\" -> \"Hello world\"), // removedEnvironmentVariables = Set(), // outputRedirection = StdOut(), // runOutputStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10720/0x00000008026f7040@3e325af1, // errorRedirection = StdOut(), // runErrorStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10721/0x0000000802701040@5bd0336b, // inputRedirection = StdIn() // ) Removing environment variables The subprocess inherits the parent process environment, so it may be necessary to remove some already defined environment variables with the without method: val proc3 = Process(\"echo\" , List(\"$PATH\")) `without` \"PATH\" // proc3: Process.ProcessImpl = ProcessImpl( // command = \"echo\", // arguments = List(\"$PATH\"), // workingDirectory = None, // environmentVariables = Map(), // removedEnvironmentVariables = Set(\"PATH\"), // outputRedirection = StdOut(), // runOutputStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10720/0x00000008026f7040@3a44ebb5, // errorRedirection = StdOut(), // runErrorStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10721/0x0000000802701040@3b6ace02, // inputRedirection = StdIn() // ) Writing reusable functions Because these methods are part of the ProcessConfiguration capability, writing reusable functions require us to define a polymorphic function that requires this capability: import java.nio.file.Path def withHome[P <: ProcessLike with ProcessLikeConfiguration](home: Path, proc: P): P#Self = proc `with` (\"HOME\" -> home.toString) Then we can use it on any kind of process or process group (read about redirection to understand why there are multiple concrete process types): val proc4 = Process(\"echo\", List(\"$HOME\")) // proc4: Process.ProcessImpl = ProcessImpl( // command = \"echo\", // arguments = List(\"$HOME\"), // workingDirectory = None, // environmentVariables = Map(), // removedEnvironmentVariables = Set(), // outputRedirection = StdOut(), // runOutputStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10720/0x00000008026f7040@17a3ee4c, // errorRedirection = StdOut(), // runErrorStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10721/0x0000000802701040@4d32dcb8, // inputRedirection = StdIn() // ) val proc5 = withHome(home, proc4) // proc5: Process.ProcessImpl#Self = ProcessImpl( // command = \"echo\", // arguments = List(\"$HOME\"), // workingDirectory = None, // environmentVariables = Map(\"HOME\" -> \"/home/runner\"), // removedEnvironmentVariables = Set(), // outputRedirection = StdOut(), // runOutputStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10720/0x00000008026f7040@17a3ee4c, // errorRedirection = StdOut(), // runErrorStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10721/0x0000000802701040@4d32dcb8, // inputRedirection = StdIn() // ) val group1 = Process(\"grep\", List(\"ERROR\")) | Process(\"sort\") // group1: ProcessGroup.ProcessGroupImpl = ProcessGroupImpl( // firstProcess = ProcessImplO( // command = \"grep\", // arguments = List(\"ERROR\"), // workingDirectory = None, // environmentVariables = Map(), // removedEnvironmentVariables = Set(), // outputRedirection = OutputStreamThroughPipe( // pipe = io.github.vigoo.prox.ProxZStream$$Lambda$10965/0x00000008031b7040@50368fe8, // runner = io.github.vigoo.prox.SyntaxModule$ProcessPiping$$Lambda$10723/0x00000008026f4840@3dc14132, // chunkSize = 8192 // ), // runOutputStream = io.github.vigoo.prox.ProcessModule$Process$ProcessImpl$$Lambda$10724/0x00000008026f7840@35980948, // errorRedirection = StdOut(), // runErrorStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10721/0x0000000802701040@6e94c45f, // inputRedirection = StdIn() // ), // innerProcesses = List(), // lastProcess = ProcessImpl( // command = \"sort\", // arguments = List(), // workingDirectory = None, // environmentVariables = Map(), // removedEnvironmentVariables = Set(), // outputRedirection = StdOut(), // runOutputStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10720/0x00000008026f7040@a395faa, // errorRedirection = StdOut(), // runErrorStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10721/0x0000000802701040@3d46470e, // inputRedirection = StdIn() // ), // originalProcesses = List( // ProcessImpl( // command = \"sort\", // arguments = List(), // workingDirectory = None, // environmentVariables = Map(), // removedEnvironmentVariables = Set(), // outputRedirection = StdOut(), // runOutputStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10720/0x00000008026f7040@a395faa, // errorRedirection = StdOut(), // runErrorStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10721/0x0000000802701040@3d46470e, // ... val group2 = withHome(home, group1) // group2: ProcessGroup.ProcessGroupImpl#Self = ProcessGroupImpl( // firstProcess = ProcessImplO( // command = \"grep\", // arguments = List(\"ERROR\"), // workingDirectory = None, // environmentVariables = Map(\"HOME\" -> \"/home/runner\"), // removedEnvironmentVariables = Set(), // outputRedirection = OutputStreamThroughPipe( // pipe = io.github.vigoo.prox.ProxZStream$$Lambda$10965/0x00000008031b7040@50368fe8, // runner = io.github.vigoo.prox.SyntaxModule$ProcessPiping$$Lambda$10723/0x00000008026f4840@3dc14132, // chunkSize = 8192 // ), // runOutputStream = io.github.vigoo.prox.ProcessModule$Process$ProcessImpl$$Lambda$10724/0x00000008026f7840@35980948, // errorRedirection = StdOut(), // runErrorStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10721/0x0000000802701040@6e94c45f, // inputRedirection = StdIn() // ), // innerProcesses = List(), // lastProcess = ProcessImpl( // command = \"sort\", // arguments = List(), // workingDirectory = None, // environmentVariables = Map(\"HOME\" -> \"/home/runner\"), // removedEnvironmentVariables = Set(), // outputRedirection = StdOut(), // runOutputStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10720/0x00000008026f7040@a395faa, // errorRedirection = StdOut(), // runErrorStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10721/0x0000000802701040@3d46470e, // inputRedirection = StdIn() // ), // originalProcesses = List( // ProcessImpl( // command = \"sort\", // arguments = List(), // workingDirectory = None, // environmentVariables = Map(), // removedEnvironmentVariables = Set(), // outputRedirection = StdOut(), // runOutputStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10720/0x00000008026f7040@a395faa, // errorRedirection = StdOut(), // runErrorStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10721/0x0000000802701040@3d46470e, // ..."
+ } ,
+ {
+ "title": "Customizing environment",
+ "url": "/prox/docs/fs2/customize.html",
+ "content": "Customizing the environment The type returned by the Process constructor also implements the ProcessConfiguration trait, adding three methods that can be used to customize the working environment of the process to be started: Working directory The in method can be used to customize the working directory: import io.github.vigoo.prox.path._ val dir = home / \"tmp\" // dir: java.nio.file.Path = /home/runner/tmp val proc1 = Process(\"ls\") in dir // proc1: Process.ProcessImpl = ProcessImpl( // command = \"ls\", // arguments = List(), // workingDirectory = Some(value = /home/runner/tmp), // environmentVariables = Map(), // removedEnvironmentVariables = Set(), // outputRedirection = StdOut(), // runOutputStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10720/0x00000008026f7040@2eb4bf64, // errorRedirection = StdOut(), // runErrorStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10721/0x0000000802701040@497fbb3f, // inputRedirection = StdIn() // ) Not that dir has the type java.nio.file.Path, and the home / tmp syntax is just a thin syntax extension to produce such values. Adding environment variables The with method can be used to add environment variables to the process in the following way: val proc2 = Process(\"echo\", List(\"$TEST\")) `with` (\"TEST\" -> \"Hello world\") // proc2: Process.ProcessImpl = ProcessImpl( // command = \"echo\", // arguments = List(\"$TEST\"), // workingDirectory = None, // environmentVariables = Map(\"TEST\" -> \"Hello world\"), // removedEnvironmentVariables = Set(), // outputRedirection = StdOut(), // runOutputStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10720/0x00000008026f7040@5171baaf, // errorRedirection = StdOut(), // runErrorStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10721/0x0000000802701040@1d4e323c, // inputRedirection = StdIn() // ) Removing environment variables The subprocess inherits the parent process environment, so it may be necessary to remove some already defined environment variables with the without method: val proc3 = Process(\"echo\" , List(\"$PATH\")) `without` \"PATH\" // proc3: Process.ProcessImpl = ProcessImpl( // command = \"echo\", // arguments = List(\"$PATH\"), // workingDirectory = None, // environmentVariables = Map(), // removedEnvironmentVariables = Set(\"PATH\"), // outputRedirection = StdOut(), // runOutputStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10720/0x00000008026f7040@7cbb3028, // errorRedirection = StdOut(), // runErrorStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10721/0x0000000802701040@3e7b00a1, // inputRedirection = StdIn() // ) Writing reusable functions Because these methods are part of the ProcessConfiguration capability, writing reusable functions require us to define a polymorphic function that requires this capability: import java.nio.file.Path def withHome[P <: ProcessLike with ProcessLikeConfiguration](home: Path, proc: P): P#Self = proc `with` (\"HOME\" -> home.toString) Then we can use it on any kind of process or process group (read about redirection to understand why there are multiple concrete process types): val proc4 = Process(\"echo\", List(\"$HOME\")) // proc4: Process.ProcessImpl = ProcessImpl( // command = \"echo\", // arguments = List(\"$HOME\"), // workingDirectory = None, // environmentVariables = Map(), // removedEnvironmentVariables = Set(), // outputRedirection = StdOut(), // runOutputStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10720/0x00000008026f7040@62037103, // errorRedirection = StdOut(), // runErrorStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10721/0x0000000802701040@e3070ad, // inputRedirection = StdIn() // ) val proc5 = withHome(home, proc4) // proc5: Process.ProcessImpl#Self = ProcessImpl( // command = \"echo\", // arguments = List(\"$HOME\"), // workingDirectory = None, // environmentVariables = Map(\"HOME\" -> \"/home/runner\"), // removedEnvironmentVariables = Set(), // outputRedirection = StdOut(), // runOutputStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10720/0x00000008026f7040@62037103, // errorRedirection = StdOut(), // runErrorStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10721/0x0000000802701040@e3070ad, // inputRedirection = StdIn() // ) val group1 = Process(\"grep\", List(\"ERROR\")) | Process(\"sort\") // group1: ProcessGroup.ProcessGroupImpl = ProcessGroupImpl( // firstProcess = ProcessImplO( // command = \"grep\", // arguments = List(\"ERROR\"), // workingDirectory = None, // environmentVariables = Map(), // removedEnvironmentVariables = Set(), // outputRedirection = OutputStreamThroughPipe( // pipe = io.github.vigoo.prox.ProxFS2$$Lambda$10722/0x00000008026f6840@2baf2b33, // runner = io.github.vigoo.prox.SyntaxModule$ProcessPiping$$Lambda$10723/0x00000008026f4840@4db758ef, // chunkSize = 8192 // ), // runOutputStream = io.github.vigoo.prox.ProcessModule$Process$ProcessImpl$$Lambda$10724/0x00000008026f7840@3445b333, // errorRedirection = StdOut(), // runErrorStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10721/0x0000000802701040@39fd180e, // inputRedirection = StdIn() // ), // innerProcesses = List(), // lastProcess = ProcessImpl( // command = \"sort\", // arguments = List(), // workingDirectory = None, // environmentVariables = Map(), // removedEnvironmentVariables = Set(), // outputRedirection = StdOut(), // runOutputStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10720/0x00000008026f7040@6b595ab1, // errorRedirection = StdOut(), // runErrorStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10721/0x0000000802701040@1cde0715, // inputRedirection = StdIn() // ), // originalProcesses = List( // ProcessImpl( // command = \"sort\", // arguments = List(), // workingDirectory = None, // environmentVariables = Map(), // removedEnvironmentVariables = Set(), // outputRedirection = StdOut(), // runOutputStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10720/0x00000008026f7040@6b595ab1, // errorRedirection = StdOut(), // runErrorStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10721/0x0000000802701040@1cde0715, // ... val group2 = withHome(home, group1) // group2: ProcessGroup.ProcessGroupImpl#Self = ProcessGroupImpl( // firstProcess = ProcessImplO( // command = \"grep\", // arguments = List(\"ERROR\"), // workingDirectory = None, // environmentVariables = Map(\"HOME\" -> \"/home/runner\"), // removedEnvironmentVariables = Set(), // outputRedirection = OutputStreamThroughPipe( // pipe = io.github.vigoo.prox.ProxFS2$$Lambda$10722/0x00000008026f6840@2baf2b33, // runner = io.github.vigoo.prox.SyntaxModule$ProcessPiping$$Lambda$10723/0x00000008026f4840@4db758ef, // chunkSize = 8192 // ), // runOutputStream = io.github.vigoo.prox.ProcessModule$Process$ProcessImpl$$Lambda$10724/0x00000008026f7840@3445b333, // errorRedirection = StdOut(), // runErrorStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10721/0x0000000802701040@39fd180e, // inputRedirection = StdIn() // ), // innerProcesses = List(), // lastProcess = ProcessImpl( // command = \"sort\", // arguments = List(), // workingDirectory = None, // environmentVariables = Map(\"HOME\" -> \"/home/runner\"), // removedEnvironmentVariables = Set(), // outputRedirection = StdOut(), // runOutputStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10720/0x00000008026f7040@6b595ab1, // errorRedirection = StdOut(), // runErrorStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10721/0x0000000802701040@1cde0715, // inputRedirection = StdIn() // ), // originalProcesses = List( // ProcessImpl( // command = \"sort\", // arguments = List(), // workingDirectory = None, // environmentVariables = Map(), // removedEnvironmentVariables = Set(), // outputRedirection = StdOut(), // runOutputStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10720/0x00000008026f7040@6b595ab1, // errorRedirection = StdOut(), // runErrorStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10721/0x0000000802701040@1cde0715, // ..."
+ } ,
+ {
+ "title": "Getting started",
+ "url": "/prox/docs/zstream/",
+ "content": "Getting started with prox First add one of the prox interfaces as a dependency: libraryDependencies += \"io.github.vigoo\" %% \"prox-zstream\" % \"0.7.3\" and import the ZIO specific API from: import io.github.vigoo.prox._ import io.github.vigoo.prox.zstream._ There is also an experimental version for ZIO 2, based on it’s snapshot releases: libraryDependencies += \"io.github.vigoo\" %% \"prox-zstream-2\" % \"0.7.3\" The code snippets in the documentation are based on the ZIO 1 version. Defining a process to run In prox a process to be executed is defined by a pure value which implements the Process[O, E] trait. The type parameters have the following meaning: O is the type of the output value after the system process has finished running E is the type of the error output value after the system process has finished running To create a simple process to be executed use the Process constructor: val proc1 = Process(\"ls\", List(\"-hal\")) // proc1: Process.ProcessImpl = ProcessImpl( // command = \"ls\", // arguments = List(\"-hal\"), // workingDirectory = None, // environmentVariables = Map(), // removedEnvironmentVariables = Set(), // outputRedirection = StdOut(), // runOutputStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10720/0x00000008026f7040@7e0a87d1, // errorRedirection = StdOut(), // runErrorStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10721/0x0000000802701040@7ae455b3, // inputRedirection = StdIn() // ) or we can use the string interpolator: val proc2 = proc\"ls -hal\" // proc2: Process.ProcessImpl = ProcessImpl( // command = \"ls\", // arguments = List(\"-hal\"), // workingDirectory = None, // environmentVariables = Map(), // removedEnvironmentVariables = Set(), // outputRedirection = StdOut(), // runOutputStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10720/0x00000008026f7040@553fef4a, // errorRedirection = StdOut(), // runErrorStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10721/0x0000000802701040@3db98ccc, // inputRedirection = StdIn() // ) Then we can customize the process execution by for example setting environment variables and working directory and redirect the input, output and error channels of the process pipe two or more processes together still staying on purely specification level. Running the process Once we have our process specification ready, we can start the process with one of the IO functions on process. But for this we first have to have a ProcessRunner implementation in scope. The default one is called JVMProcessRunner and it can be created in the following way: implicit val runner: ProcessRunner[JVMProcessInfo] = new JVMProcessRunner Read the custom process runners page for an example of using a customized runner. With the runner in place we can use several methods to start the process. The simplest one is called run and it blocks the active thread until the process finishes running: proc1.run() // res0: ProxIO[ProcessResult[Unit, Unit]] = OnSuccess( // trace = \"io.github.vigoo.prox.ProxZStream.useResource(ProxZStream.scala:117)\", // first = OnSuccess( // trace = \"io.github.vigoo.prox.ProxZStream.useResource(ProxZStream.scala:117)\", // first = Sync( // trace = \"io.github.vigoo.prox.ProxZStream.useResource(ProxZStream.scala:117)\", // eval = zio.Scope$ReleaseMap$$$Lambda$10957/0x000000080319fc40@1727b0a5 // ), // successK = zio.ZIO$$Lambda$10959/0x000000080319e040@4fb6263f // ), // successK = zio.ZIO$ScopedPartiallyApplied$$$Lambda$10960/0x000000080319d040@6559d53c // ) The result of this IO action is a ProcessResult[O, E], with the ability to observe the exit code and the redirected output and error values. In our first example both O and E were Unit because the default is to redirect output and error to the standard output and standard error streams."
+ } ,
+ {
+ "title": "Getting started",
+ "url": "/prox/docs/fs2/",
+ "content": "Getting started with prox First add one of the prox interfaces as a dependency: libraryDependencies += \"io.github.vigoo\" %% \"prox-fs2\" % \"0.7.3\" or for Cats Effect 3.x / FS2 3.x: libraryDependencies += \"io.github.vigoo\" %% \"prox-fs2-3\" % \"0.7.3\" and, assuming that we have a long living Blocker thread pool defined already, we can create the Prox module: val prox = ProxFS2[IO] // prox: ProxFS2[IO] = io.github.vigoo.prox.ProxFS2$$anon$1@3a0166d9 import prox._ We require F to implement the Concurrent type class, and for that we have to have an implicit context shifter in scope (this should be already available in an application using cats-effect). Defining a process to run In prox a process to be executed is defined by a pure value which implements the Process[O, E] trait. The type parameters have the following meaning: O is the type of the output value after the system process has finished running E is the type of the error output value after the system process has finished running To create a simple process to be executed use the Process constructor: val proc1 = Process(\"ls\", List(\"-hal\")) // proc1: Process.ProcessImpl = ProcessImpl( // command = \"ls\", // arguments = List(\"-hal\"), // workingDirectory = None, // environmentVariables = Map(), // removedEnvironmentVariables = Set(), // outputRedirection = StdOut(), // runOutputStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10720/0x00000008026f7040@2bbb290f, // errorRedirection = StdOut(), // runErrorStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10721/0x0000000802701040@6e7a64eb, // inputRedirection = StdIn() // ) or we can use the string interpolator: val proc2 = proc\"ls -hal\" // proc2: Process.ProcessImpl = ProcessImpl( // command = \"ls\", // arguments = List(\"-hal\"), // workingDirectory = None, // environmentVariables = Map(), // removedEnvironmentVariables = Set(), // outputRedirection = StdOut(), // runOutputStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10720/0x00000008026f7040@6fecc06b, // errorRedirection = StdOut(), // runErrorStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10721/0x0000000802701040@5eea44e1, // inputRedirection = StdIn() // ) Then we can customize the process execution by for example setting environment variables and working directory and redirect the input, output and error channels of the process pipe two or more processes together still staying on purely specification level. Running the process Once we have our process specification ready, we can start the process with one of the IO functions on process. But for this we first have to have a ProcessRunner implementation in scope. The default one is called JVMProcessRunner and it can be created in the following way: implicit val runner: ProcessRunner[JVMProcessInfo] = new JVMProcessRunner Read the custom process runners page for an example of using a customized runner. With the runner in place we can use several methods to start the process. The simplest one is called run and it blocks the active thread until the process finishes running: proc1.run() // res0: ProxIO[ProcessResult[Unit, Unit]] = Uncancelable( // body = cats.effect.IO$$$Lambda$10820/0x000000080309a840@2c6bfd62, // event = cats.effect.tracing.TracingEvent$StackTrace // ) The result of this IO action is a ProcessResult[O, E], with the ability to observe the exit code and the redirected output and error values. In our first example both O and E were Unit because the default is to redirect output and error to the standard output and standard error streams."
+ } ,
+ {
+ "title": "Getting started",
+ "url": "/prox/docs/",
+ "content": "Prox has two different interfaces: Cats Effect with FS2 ZIO with ZStream"
+ } ,
+ {
+ "title": "prox: Home",
+ "url": "/prox/",
+ "content": "Prox is a Scala library for running system processes, plugging them to each other and redirecting them to streams."
+ } ,
+ {
+ "title": "Migration",
+ "url": "/prox/docs/migration.html",
+ "content": "Migration from 0.1.x to 0.2 The start method on processes now requires a blockingExecutionContext argument Ignore has been renamed to Drain Log has been renamed to ToVector from 0.2 to 0.4 Process now takes the effect type as parameter, so in case of cats-effect, Process(...) becomes Process[IO](...) The start method on processes now gets a Blocker instead of an execution context from 0.4 to 0.5 0.5 is a complete rewrite of the original library, and the API changed a lot, especially if the process types were used in code to pass around / wrap them. Please refer to the other sections of the documentation to learn how to reimplement them. For simple use cases where constructing and running the processes directly the main differences are: Different operators / methods for different source and target types, see the page about redirection The need of an implicit process runner in scope New ways to start and wait for the process, see the page about runnning processes from 0.5 to 0.6 0.6 introduces the native ZIO/ZStream version of the library. For existing code the following differences apply: Instead of prox, the artifact is now called prox-fs2 Instead of global imports, the FS2 prox module now has to be constructed with the FS2 constructor and the API is imported from that Because the FS2 module captures the F[_] and the Blocker, they are no longer needed to pass on to the API functions and types"
+ } ,
+ {
+ "title": "Process groups",
+ "url": "/prox/docs/zstream/processgroups.html",
+ "content": "Connecting processes together via pipes Connecting one process to another means that the standard output of the first process gets redirected to the standard input of the second process. This is implemented using the redirection capabilities described on the redirection page. The result of connecting one process to another is called a process group and it implements the trait ProcessGroup[O, E]. To create a process group, either: Use the | or via methods between two unbounded processes Use the | or via methods between an unbounded process group and an unbounded process It is important that the process group construction must always happen before any redirection, the type system enforces this by requiring the involved processes to be UnboundedProcess. :bulb: Process.UnboundedProcess is a type alias for a process with all the redirection capabilities Let’s see an example of simply pipeing: val group1 = Process(\"grep\", List(\"ERROR\")) | Process(\"sort\") val group2 = group1 | Process(\"uniq\", List(\"-c\")) A custom pipe (when using via) can be anything of the type ZStream[any, ProxError, Byte] => ZStream[any, ProxError, Byte]). The following not very useful example capitalizes each word coming through: val customPipe: ProxPipe[Byte, Byte] = (s: ZStream[Any, ProxError, Byte]) => s .via(ZPipeline.utf8Decode.mapError(UnknownProxError.apply)) // decode UTF-8 .via(ZPipeline.splitLines) // split to lines .map(_.split(' ').toVector) // split lines to words .map(v => v.map(_.capitalize).mkString(\" \")) .intersperse(\"\\n\") // remerge lines .flatMap(str => ZStream.fromIterable(str.getBytes(StandardCharsets.UTF_8))) // reencode val group3 = Process(\"echo\", List(\"hello world\")).via(customPipe).to(Process(\"wc\", List(\"-w\")))"
+ } ,
+ {
+ "title": "Process groups",
+ "url": "/prox/docs/fs2/processgroups.html",
+ "content": "Connecting processes together via pipes Connecting one process to another means that the standard output of the first process gets redirected to the standard input of the second process. This is implemented using the redirection capabilities described on the redirection page. The result of connecting one process to another is called a process group and it implements the trait ProcessGroup[O, E]. To create a process group, either: Use the | or via methods between two unbounded processes Use the | or via methods between an unbounded process group and an unbounded process It is important that the process group construction must always happen before any redirection, the type system enforces this by requiring the involved processes to be UnboundedProcess. :bulb: Process.UnboundedProcess is a type alias for a process with all the redirection capabilities Let’s see an example of simply piping: val group1 = Process(\"grep\", List(\"ERROR\")) | Process(\"sort\") val group2 = group1 | Process(\"uniq\", List(\"-c\")) A custom pipe (when using via) can be anything of the type Pipe[F, Byte, Byte]. The following not very useful example capitalizes each word coming through: val customPipe: fs2.Pipe[IO, Byte, Byte] = (s: fs2.Stream[IO, Byte]) => s .through(fs2.text.utf8.decode) // decode UTF-8 .through(fs2.text.lines) // split to lines .map(_.split(' ').toVector) // split lines to words .map(v => v.map(_.capitalize).mkString(\" \")) .intersperse(\"\\n\") // remerge lines .through(fs2.text.utf8.encode) // encode as UTF-8 val group3 = Process(\"echo\", List(\"hello world\")).via(customPipe).to(Process(\"wc\", List(\"-w\")))"
+ } ,
+ {
+ "title": "Redirection",
+ "url": "/prox/docs/zstream/redirection.html",
+ "content": "Redirecting input, output and error Similarly to customization, redirection is also implemented with capability traits. The ProcessIO type returned by the Process constructor implements all the three redirection capability traits: RedirectableInput marks that the standard input of the process is not bound yet RedirectableOutput marks that the standard output of the process is not bound yet RedirectableError marks that the standard error output of the process is not bound yet Each of the three channels can be only redirected once. The result type of each redirection method no longer implements the given capability. Let’s see an example of this (redirection methods are described below on this page): import zio._ import zio.stream._ import zio.prelude._ val proc1 = Process(\"echo\", List(\"Hello world\")) // proc1: Process.ProcessImpl = ProcessImpl( // command = \"echo\", // arguments = List(\"Hello world\"), // workingDirectory = None, // environmentVariables = Map(), // removedEnvironmentVariables = Set(), // outputRedirection = StdOut(), // runOutputStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10720/0x00000008026f7040@3281308b, // errorRedirection = StdOut(), // runErrorStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10721/0x0000000802701040@65fd9c21, // inputRedirection = StdIn() // ) val proc2 = proc1 ># ZPipeline.utf8Decode // proc2: Process.ProcessImplO[String] = ProcessImplO( // command = \"echo\", // arguments = List(\"Hello world\"), // workingDirectory = None, // environmentVariables = Map(), // removedEnvironmentVariables = Set(), // outputRedirection = OutputStreamThroughPipe( // pipe = io.github.vigoo.prox.ProxZStream$$Lambda$10979/0x00000008031db840@30088784, // runner = io.github.vigoo.prox.RedirectionModule$RedirectableOutput$$Lambda$10898/0x0000000803107040@202f727d, // chunkSize = 8192 // ), // runOutputStream = io.github.vigoo.prox.ProcessModule$Process$ProcessImpl$$Lambda$10724/0x00000008026f7840@1b110b9b, // errorRedirection = StdOut(), // runErrorStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10721/0x0000000802701040@65fd9c21, // inputRedirection = StdIn() // ) It is no longer possible to redirect the output of proc2: val proc3 = proc2 >? (ZPipeline.utf8Decode >>> ZPipeline.splitLines) // error: value >? is not a member of io.github.vigoo.prox.zstream.Process.ProcessImplO[String] // did you mean !>?? // val proc3 = proc2 >? (ZPipeline.utf8Decode >>> ZPipeline.splitLines) // ^^^^^^^^ Many redirection methods have an operator version but all of them have alphanumberic variants as well. Input redirection Input redirection is enabled by the RedirectableInput trait. The following operations are supported: operator alternative parameter type what it does < fromFile java.nio.file.Path Natively attach a source file to STDIN < fromStream ZStream[Any, ProxError, Byte] Attach a ZIO byte stream to STDIN !< fromStream ZStream[Any, ProxError, Byte] Attach a ZIO byte stream to STDIN and flush after each chunk Output redirection Output redirection is enabled by the RedirectableOutput trait. The following operations are supported: operator alternative parameter type result type what it does > toFile java.nio.file.Path Unit Natively attach STDOUT to a file >> appendToFile java.nio.file.Path Unit Natively attach STDOUT to a file in append mode > toSink TransformAndSink[Byte, _] Unit Drains the STDOUT through the given sink ># toFoldMonoid [O: Identity](ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O]) O Sends STDOUT through the stream and folds the result using its monoid instance >? toVector ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O]) Vector[O] Sends STDOUT through the stream and collects the results drainOutput ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O]) Unit Drains the STDOUT through the given stream foldOutput ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O]), R, (R, O) => R R Sends STDOUT through the stream and folds the result using a custom fold function All the variants that accept a stream transformation (ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O])) are also usable by directly passing a ZPipeline. TransformAndSink encapsulates a stream transformation and a unit sink. It is possible to use a sink directly if transformation is not needed. case class TransformAndSink[A, B](transform: ZStream[Any, ProxError, A] => ZStream[Any, ProxError, B], sink: ZSink[Any, ProxError, B, Any, Unit]) Error redirection Error redirection is enabled by the RedirectableError trait. The following operations are supported: operator alternative parameter type result type what it does !> errorToFile java.nio.file.Path Unit Natively attach STDERR to a file !>> appendErrorToFile java.nio.file.Path Unit Natively attach STDERR to a file in append mode !> errorToSink TransformAndSink[Byte, _] Unit Drains the STDERR through the given sink !># errorToFoldMonoid [O: Monoid](ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O]) O Sends STDERR through the pipe and folds the result using its monoid instance !>? errorToVector ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O]) Vector[O] Sends STDERR through the pipe and collects the results drainError ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O]) Unit Drains the STDERR through the given pipe foldError ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O]), R, (R, O) => R R Sends STDERR through the pipe and folds the result using a custom fold function Redirection for process groups Process groups are two or more processes attached together through pipes. This connection is internally implemented using the above described redirection capabilities. This means that all but the first process has their inputs bound, and all but the last one has their outputs bound. Redirection of input and output for a process group is thus a well defined operation meaning redirection of input of the first process and redirection of output of the last process. For this reason the class created via process piping implements the RedirectableInput and RedirectableOutput traits described above. For the sake of simplicity the library does not support anymore the fully customizable per-process error redirection for process groups, but a reduced but still quite expressive version described by the RedirectableErrors trait. The methods in this trait define error redirection for all process in the group at once: operator alternative parameter type result type what it does !> errorsToSink TransformAndSink[Byte, _] Unit Drains the STDERR through the given sink !># errorsToFoldMonoid [O: Monoid](ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O]) O Sends STDERR through the stream and folds the result using its monoid instance !>? errorsToVector ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O]) Vector[O] Sends STDERR through the stream and collects the results drainErrors ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O]) Unit Drains the STDERR through the given stream foldErrors ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O]), R, (R, O) => R R Sends STDERR through the stream and folds the result using a custom fold function Redirection to file is not possible through this interface as only a single path could be provided. The result of these redirections is accessible through the ProcessGroupResult interface as it is described in the running processes section. By using the RedirectableErrors.customizedPerProcess interface (having the type RedirectableErrors.CustomizedPerProcess) it is possible to customize the redirection targets per process while keeping their types uniform: operator alternative parameter type result type what it does errorsToFile Process => java.nio.file.Path Unit Natively attach STDERR to a file appendErrorsToFile Process => java.nio.file.Path Unit Natively attach STDERR to a file in append mode errorsToSink Process => TransformAndSink[Byte, _] Unit Drains the STDERR through the given sink errorsToFoldMonoid Process => [O: Monoid](ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O]) O Sends STDERR through the stream and folds the result using its monoid instance errorsToVector Process => ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O]) Vector[O] Sends STDERR through the stream and collects the results drainErrors Process => ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O]) Unit Drains the STDERR through the given stream foldErrors Process => ZStream[Any, ProxError, Byte] => ZStream[Any, ProxError, O]), R, (R, O) => R R Sends STDERR through the stream and folds the result using a custom fold function Let’s see an example of how this works! First we define a queue where we want to send error lines from all the involved processes, then we define the two processes separately, connect them with a pipe and customize the error redirection where we prefix the parsed lines based on which process they came from: for { errors <- Queue.unbounded[String] parseLines = (s: ZStream[Any, ProxError, Byte]) => s.via(ZPipeline.utf8Decode.mapError(UnknownProxError.apply) >>> ZPipeline.splitLines) p1 = Process(\"proc1\") p2 = Process(\"proc2\") group = (p1 | p2).customizedPerProcess.errorsToSink { case p if p == p1 => TransformAndSink(parseLines.andThen(_.map(s => \"P1: \" + s)), ZSink.foreach(errors.offer)) case p if p == p2 => TransformAndSink(parseLines.andThen(_.map(s => \"P2: \" + s)), ZSink.foreach(errors.offer)) } } yield () Creating reusable functions The Process object contains several useful type aliases for writing functions that work with any process by only specifying what redirection channels we want unbounded. The UnboundProcess represents a process which is fully unbound, no redirection has been done yet. It is defined as follows: type UnboundProcess = Process[Unit, Unit] with RedirectableInput[UnboundOEProcess] with RedirectableOutput[UnboundIEProcess[*]] with RedirectableError[UnboundIOProcess[*]] where UnboundIOProcess[E] for example represents a process which has its error output already bound. These type aliases can be used to define functions performing redirection on arbitrary processes, for example: def logErrors[P <: Process.UnboundEProcess[_]](proc: P) = { val target = TransformAndSink( ZPipeline.utf8Decode.mapError(UnknownProxError.apply) >>> ZPipeline.splitLines, ZSink.foreach((line: String) => ZIO.debug(line))) proc !> target } val proc4 = logErrors(Process(\"something\")) // proc4: Process[_, Unit] = ProcessImplE( // command = \"something\", // arguments = List(), // workingDirectory = None, // environmentVariables = Map(), // removedEnvironmentVariables = Set(), // outputRedirection = StdOut(), // runOutputStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10720/0x00000008026f7040@1fdd241b, // errorRedirection = OutputStreamToSink( // sink = TransformAndSink( // transform = io.github.vigoo.prox.ProxZStream$TransformAndSink$$$Lambda$10991/0x0000000803237040@440a1cdc, // sink = zio.stream.ZSink@6e1f2307 // ), // chunkSize = 8192 // ), // runErrorStream = io.github.vigoo.prox.ProcessModule$Process$ProcessImpl$$Lambda$10902/0x0000000803103840@1c3ed5df, // inputRedirection = StdIn() // )"
+ } ,
+ {
+ "title": "Redirection",
+ "url": "/prox/docs/fs2/redirection.html",
+ "content": "Redirecting input, output and error Similarly to customization, redirection is also implemented with capability traits. The ProcessIO type returned by the Process constructor implements all the three redirection capability traits: RedirectableInput marks that the standard input of the process is not bound yet RedirectableOutput marks that the standard output of the process is not bound yet RedirectableError marks that the standard error output of the process is not bound yet Each of the three channels can be only redirected once. The result type of each redirection method no longer implements the given capability. Let’s see an example of this (redirection methods are described below on this page): import cats.implicits._ val proc1 = Process(\"echo\", List(\"Hello world\")) // proc1: Process.ProcessImpl = ProcessImpl( // command = \"echo\", // arguments = List(\"Hello world\"), // workingDirectory = None, // environmentVariables = Map(), // removedEnvironmentVariables = Set(), // outputRedirection = StdOut(), // runOutputStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10720/0x00000008026f7040@3c441db9, // errorRedirection = StdOut(), // runErrorStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10721/0x0000000802701040@646dcd02, // inputRedirection = StdIn() // ) val proc2 = proc1 ># fs2.text.utf8.decode // proc2: Process.ProcessImplO[String] = ProcessImplO( // command = \"echo\", // arguments = List(\"Hello world\"), // workingDirectory = None, // environmentVariables = Map(), // removedEnvironmentVariables = Set(), // outputRedirection = OutputStreamThroughPipe( // pipe = fs2.text$utf8$$$Lambda$10896/0x0000000803101040@2ef6a566, // runner = io.github.vigoo.prox.RedirectionModule$RedirectableOutput$$Lambda$10898/0x0000000803107040@13e21eb2, // chunkSize = 8192 // ), // runOutputStream = io.github.vigoo.prox.ProcessModule$Process$ProcessImpl$$Lambda$10724/0x00000008026f7840@f5760a2, // errorRedirection = StdOut(), // runErrorStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10721/0x0000000802701040@646dcd02, // inputRedirection = StdIn() // ) It is no longer possible to redirect the output of proc2: val proc3 = proc2 >? fs2.text.utf8.decode[IO].andThen(fs2.text.lines) // error: value >? is not a member of repl.MdocSession.MdocApp.prox.Process.ProcessImplO[String] // did you mean !>?? // val proc3 = proc2 >? fs2.text.utf8.decode[IO].andThen(fs2.text.lines) // ^^^^^^^^ Many redirection methods have an operator version but all of them have alphanumberic variants as well. Input redirection Input redirection is enabled by the RedirectableInput trait. The following operations are supported: operator alternative parameter type what it does < fromFile java.nio.file.Path Natively attach a source file to STDIN < fromStream Stream[F, Byte] Attach an fs2 byte stream to STDIN !< fromStream Stream[F, Byte] Attach an fs2 byte stream to STDIN and flush after each chunk Output redirection Output redirection is enabled by the RedirectableOutput trait. The following operations are supported: operator alternative parameter type result type what it does > toFile java.nio.file.Path Unit Natively attach STDOUT to a file >> appendToFile java.nio.file.Path Unit Natively attach STDOUT to a file in append mode > toSink Pipe[F, Byte, Unit] Unit Drains the STDOUT through the given pipe ># toFoldMonoid [O: Monoid](Pipe[F, Byte, O] O Sends STDOUT through the pipe and folds the result using its monoid instance >? toVector Pipe[F, Byte, O] Vector[O] Sends STDOUT through the pipe and collects the results drainOutput Pipe[F, Byte, O] Unit Drains the STDOUT through the given pipe foldOutput Pipe[F, Byte, O], R, (R, O) => R R Sends STDOUT through the pipe and folds the result using a custom fold function Error redirection Error redirection is enabled by the RedirectableError trait. The following operations are supported: operator alternative parameter type result type what it does !> errorToFile java.nio.file.Path Unit Natively attach STDERR to a file !>> appendErrorToFile java.nio.file.Path Unit Natively attach STDERR to a file in append mode !> errorToSink Pipe[F, Byte, Unit] Unit Drains the STDERR through the given pipe !># errorToFoldMonoid [O: Monoid](Pipe[F, Byte, O] O Sends STDERR through the pipe and folds the result using its monoid instance !>? errorToVector Pipe[F, Byte, O] Vector[O] Sends STDERR through the pipe and collects the results drainError Pipe[F, Byte, O] Unit Drains the STDERR through the given pipe foldError Pipe[F, Byte, O], R, (R, O) => R R Sends STDERR through the pipe and folds the result using a custom fold function Redirection for process groups Process groups are two or more processes attached together through pipes. This connection is internally implemented using the above described redirection capabilities. This means that all but the first process has their inputs bound, and all but the last one has their outputs bound. Redirection of input and output for a process group is thus a well defined operation meaning redirection of input of the first process and redirection of output of the last process. For this reason the class created via process piping implements the RedirectableInput and RedirectableOutput traits described above. For the sake of simplicity the library does not support anymore the fully customizable per-process error redirection for process groups, but a reduced but still quite expressive version described by the RedirectableErrors trait. The methods in this trait define error redirection for all process in the group at once: operator alternative parameter type result type what it does !> errorsToSink Pipe[F, Byte, Unit] Unit Drains the STDERR through the given pipe !># errorsToFoldMonoid [O: Monoid](Pipe[F, Byte, O] O Sends STDERR through the pipe and folds the result using its monoid instance !>? errorsToVector Pipe[F, Byte, O] Vector[O] Sends STDERR through the pipe and collects the results drainErrors Pipe[F, Byte, O] Unit Drains the STDERR through the given pipe foldErrors Pipe[F, Byte, O], R, (R, O) => R R Sends STDERR through the pipe and folds the result using a custom fold function Redirection to file is not possible through this interface as only a single path could be provided. The result of these redirections is accessible through the ProcessGroupResult interface as it is described in the running processes section. By using the RedirectableErrors.customizedPerProcess interface (having the type RedirectableErrors.CustomizedPerProcess) it is possible to customize the redirection targets per process while keeping their types uniform: operator alternative parameter type result type what it does errorsToFile Process => java.nio.file.Path Unit Natively attach STDERR to a file appendErrorsToFile Process => java.nio.file.Path Unit Natively attach STDERR to a file in append mode errorsToSink Process => Pipe[F, Byte, Unit] Unit Drains the STDERR through the given pipe errorsToFoldMonoid Process => [O: Monoid](Pipe[F, Byte, O] O Sends STDERR through the pipe and folds the result using its monoid instance errorsToVector Process => Pipe[F, Byte, O] Vector[O] Sends STDERR through the pipe and collects the results drainErrors Process => Pipe[F, Byte, O] Unit Drains the STDERR through the given pipe foldErrors Process => Pipe[F, Byte, O], R, (R, O) => R R Sends STDERR through the pipe and folds the result using a custom fold function Let’s see an example of how this works! First we define a queue where we want to send error lines from all the involved processes, then we define the two processes separately, connect them with a pipe and customize the error redirection where we prefix the parsed lines based on which process they came from: import cats.effect.std.Queue for { errors <- Queue.unbounded[IO, String] parseLines = fs2.text.utf8.decode[IO].andThen(fs2.text.lines) p1 = Process(\"proc1\") p2 = Process(\"proc2\") group = (p1 | p2).customizedPerProcess.errorsToSink { case p if p == p1 => parseLines.andThen(_.map(s => \"P1: \" + s)).andThen(_.evalMap(errors.offer)) case p if p == p2 => parseLines.andThen(_.map(s => \"P2: \" + s)).andThen(_.evalMap(errors.offer)) } } yield () Creating reusable functions The Process object contains several useful type aliases for writing functions that work with any process by only specifying what redirection channels we want unbounded. The UnboundProcess represents a process which is fully unbound, no redirection has been done yet. It is defined as follows: type UnboundProcess = Process[Unit, Unit] with RedirectableInput[UnboundOEProcess] with RedirectableOutput[UnboundIEProcess[*]] with RedirectableError[UnboundIOProcess[*]] where UnboundIOProcess[E] for example represents a process which has its error output already bound. These type aliases can be used to define functions performing redirection on arbitrary processes, for example: def logErrors[P <: Process.UnboundEProcess[_]](proc: P) = { val target = fs2.text.utf8.decode[IO].andThen(fs2.text.lines).andThen(_.evalMap(line => IO(println(line)))) proc !> target } val proc4 = logErrors(Process(\"something\")) // proc4: Process[_, Unit] = ProcessImplE( // command = \"something\", // arguments = List(), // workingDirectory = None, // environmentVariables = Map(), // removedEnvironmentVariables = Set(), // outputRedirection = StdOut(), // runOutputStream = io.github.vigoo.prox.ProcessModule$Process$$$Lambda$10720/0x00000008026f7040@465296ac, // errorRedirection = OutputStreamToSink( // sink = scala.Function1$$Lambda$10901/0x0000000803105040@4b7024b1, // chunkSize = 8192 // ), // runErrorStream = io.github.vigoo.prox.ProcessModule$Process$ProcessImpl$$Lambda$10902/0x0000000803103840@effd624, // inputRedirection = StdIn() // )"
+ } ,
+ {
+ "title": "Running processes",
+ "url": "/prox/docs/zstream/running.html",
+ "content": "Running processes and process groups There are three methods for running a process: The run method is the simplest one, it starts the process and then blocks the current fiber until it terminates The start method starts the process and returns a fiber packed into a resource. The fiber finishes when the process terminates. Canceling the fiber terminates the process. The startProcess method returns a RunningProcess[O, E] interface that allows advanced some operations Similarly for a process group, there is a run, a start and a startProcessGroup method but with different result types. Let’s see some examples! implicit val runner: ProcessRunner[JVMProcessInfo] = new JVMProcessRunner val process = Process(\"echo\", List(\"hello\")) val result1 = process.run() val result2 = ZIO.scoped { process.start().flatMap { fiber => fiber.join } } val result3 = for { runningProcess <- process.startProcess() _ <- runningProcess.kill() } yield () Both RunningProcess and RunningProcessGroup has the following methods: waitForExit() waits until the process terminates terminate() sends SIGTERM to the process kill() sends SIGKILL to the process In addition RunningProcess also defines an isAlive check. Process execution result The result of a process is represented by ProcessResult[O, E] defined as follows: trait ProcessResult[+O, +E] { val exitCode: ExitCode val output: O val error: E } The type and value of output and error depends on what redirection was defined on the process. Process group execution result The result of a process group is represented by ProcessGroupResult[O, E]: trait ProcessGroupResult[+O, +E] { val exitCodes: Map[Process[Unit, Unit], ExitCode] val output: O val errors: Map[Process[Unit, Unit], E] } The keys of the maps are the original process values used in the piping operations."
+ } ,
+ {
+ "title": "Running processes",
+ "url": "/prox/docs/fs2/running.html",
+ "content": "Running processes and process groups There are three methods for running a process: The run method is the simplest one, it starts the process and then blocks the current fiber until it terminates The start method starts the process and returns a fiber packed into a resource. The fiber finishes when the process terminates. Canceling the fiber terminates the process. The startProcess method returns a RunningProcess[O, E] interface that allows advanced some operations Similarly for a process group, there is a run, a start and a startProcessGroup method but with different result types. Let’s see some examples! implicit val runner: ProcessRunner[JVMProcessInfo] = new JVMProcessRunner val process = Process(\"echo\", List(\"hello\")) val result1 = process.run() val result2 = process.start().flatMap { fiber => fiber.join } val result3 = for { runningProcess <- process.startProcess() _ <- runningProcess.kill() } yield () Both RunningProcess and RunningProcessGroup has the following methods: waitForExit() waits until the process terminates terminate() sends SIGTERM to the process kill() sends SIGKILL to the process In addition RunningProcess also defines an isAlive check. Process execution result The result of a process is represented by ProcessResult[O, E] defined as follows: trait ProcessResult[+O, +E] { val exitCode: ExitCode val output: O val error: E } The type and value of output and error depends on what redirection was defined on the process. Process group execution result The result of a process group is represented by ProcessGroupResult[O, E]: trait ProcessGroupResult[+O, +E] { val exitCodes: Map[Process[Unit, Unit], ExitCode] val output: O val errors: Map[Process[Unit, Unit], E] } The keys of the maps are the original process values used in the piping operations."
+ } ,
+ ];
+
+ idx = lunr(function () {
+ this.ref("title");
+ this.field("content");
+
+ docs.forEach(function (doc) {
+ this.add(doc);
+ }, this);
+ });
+
+ docs.forEach(function (doc) {
+ docMap.set(doc.title, doc.url);
+ });
+}
+
+// The onkeypress handler for search functionality
+function searchOnKeyDown(e) {
+ const keyCode = e.keyCode;
+ const parent = e.target.parentElement;
+ const isSearchBar = e.target.id === "search-bar";
+ const isSearchResult = parent ? parent.id.startsWith("result-") : false;
+ const isSearchBarOrResult = isSearchBar || isSearchResult;
+
+ if (keyCode === 40 && isSearchBarOrResult) {
+ // On 'down', try to navigate down the search results
+ e.preventDefault();
+ e.stopPropagation();
+ selectDown(e);
+ } else if (keyCode === 38 && isSearchBarOrResult) {
+ // On 'up', try to navigate up the search results
+ e.preventDefault();
+ e.stopPropagation();
+ selectUp(e);
+ } else if (keyCode === 27 && isSearchBarOrResult) {
+ // On 'ESC', close the search dropdown
+ e.preventDefault();
+ e.stopPropagation();
+ closeDropdownSearch(e);
+ }
+}
+
+// Search is only done on key-up so that the search terms are properly propagated
+function searchOnKeyUp(e) {
+ // Filter out up, down, esc keys
+ const keyCode = e.keyCode;
+ const cannotBe = [40, 38, 27];
+ const isSearchBar = e.target.id === "search-bar";
+ const keyIsNotWrong = !cannotBe.includes(keyCode);
+ if (isSearchBar && keyIsNotWrong) {
+ // Try to run a search
+ runSearch(e);
+ }
+}
+
+// Move the cursor up the search list
+function selectUp(e) {
+ if (e.target.parentElement.id.startsWith("result-")) {
+ const index = parseInt(e.target.parentElement.id.substring(7));
+ if (!isNaN(index) && (index > 0)) {
+ const nextIndexStr = "result-" + (index - 1);
+ const querySel = "li[id$='" + nextIndexStr + "'";
+ const nextResult = document.querySelector(querySel);
+ if (nextResult) {
+ nextResult.firstChild.focus();
+ }
+ }
+ }
+}
+
+// Move the cursor down the search list
+function selectDown(e) {
+ if (e.target.id === "search-bar") {
+ const firstResult = document.querySelector("li[id$='result-0']");
+ if (firstResult) {
+ firstResult.firstChild.focus();
+ }
+ } else if (e.target.parentElement.id.startsWith("result-")) {
+ const index = parseInt(e.target.parentElement.id.substring(7));
+ if (!isNaN(index)) {
+ const nextIndexStr = "result-" + (index + 1);
+ const querySel = "li[id$='" + nextIndexStr + "'";
+ const nextResult = document.querySelector(querySel);
+ if (nextResult) {
+ nextResult.firstChild.focus();
+ }
+ }
+ }
+}
+
+// Search for whatever the user has typed so far
+function runSearch(e) {
+ if (e.target.value === "") {
+ // On empty string, remove all search results
+ // Otherwise this may show all results as everything is a "match"
+ applySearchResults([]);
+ } else {
+ const tokens = e.target.value.split(" ");
+ const moddedTokens = tokens.map(function (token) {
+ // "*" + token + "*"
+ return token;
+ })
+ const searchTerm = moddedTokens.join(" ");
+ const searchResults = idx.search(searchTerm);
+ const mapResults = searchResults.map(function (result) {
+ const resultUrl = docMap.get(result.ref);
+ return { name: result.ref, url: resultUrl };
+ })
+
+ applySearchResults(mapResults);
+ }
+
+}
+
+// After a search, modify the search dropdown to contain the search results
+function applySearchResults(results) {
+ const dropdown = document.querySelector("div[id$='search-dropdown'] > .dropdown-content.show");
+ if (dropdown) {
+ //Remove each child
+ while (dropdown.firstChild) {
+ dropdown.removeChild(dropdown.firstChild);
+ }
+
+ //Add each result as an element in the list
+ results.forEach(function (result, i) {
+ const elem = document.createElement("li");
+ elem.setAttribute("class", "dropdown-item");
+ elem.setAttribute("id", "result-" + i);
+
+ const elemLink = document.createElement("a");
+ elemLink.setAttribute("title", result.name);
+ elemLink.setAttribute("href", result.url);
+ elemLink.setAttribute("class", "dropdown-item-link");
+
+ const elemLinkText = document.createElement("span");
+ elemLinkText.setAttribute("class", "dropdown-item-link-text");
+ elemLinkText.innerHTML = result.name;
+
+ elemLink.appendChild(elemLinkText);
+ elem.appendChild(elemLink);
+ dropdown.appendChild(elem);
+ });
+ }
+}
+
+// Close the dropdown if the user clicks (only) outside of it
+function closeDropdownSearch(e) {
+ // Check if where we're clicking is the search dropdown
+ if (e.target.id !== "search-bar") {
+ const dropdown = document.querySelector("div[id$='search-dropdown'] > .dropdown-content.show");
+ if (dropdown) {
+ dropdown.classList.remove("show");
+ document.documentElement.removeEventListener("click", closeDropdownSearch);
+ }
+ }
+}
diff --git a/js/version-selector.js b/js/version-selector.js
new file mode 100644
index 00000000..0c3e5855
--- /dev/null
+++ b/js/version-selector.js
@@ -0,0 +1,36 @@
+/* When the user clicks on the navigation Documentation button,
+ * toggle between hiding and showing the dropdown content.
+ */
+function displayToggleVersion(e) {
+ e.preventDefault();
+ e.stopPropagation();
+ // Calling close func. in case we're clicking another dropdown with one opened
+ closeDropdownVersion(e);
+ const parent = e.target.closest("div[id$='version-dropdown']");
+ if (parent) {
+ const dropdown = parent.querySelector("#version-dropdown-content");
+ if (dropdown) {
+ dropdown.classList.toggle("show");
+ if (dropdown.classList.contains("show")) {
+ document.documentElement.addEventListener("click", closeDropdownVersion);
+ }
+ else {
+ document.documentElement.removeEventListener("click", closeDropdownVersion);
+ }
+ }
+ }
+}
+
+// Close the dropdown if the user clicks (only) outside of it
+function closeDropdownVersion(e) {
+ const dropdown = document.querySelector("div[id$='version-dropdown'] > .dropdown-content.show");
+ if (dropdown) {
+ const currentTarget = e.currentTarget || {};
+ const currentTargetParent = currentTarget.closest("div[id$='version-dropdown']");
+ const dropdownParent = dropdown.closest("div[id$='version-dropdown']");
+ if (currentTargetParent !== dropdownParent) {
+ dropdown.classList.remove("show");
+ }
+ document.documentElement.removeEventListener("click", closeDropdownVersion);
+ }
+}
diff --git a/lunr/LICENSE b/lunr/LICENSE
new file mode 100644
index 00000000..8a12a69b
--- /dev/null
+++ b/lunr/LICENSE
@@ -0,0 +1,20 @@
+
+Copyright (C) 2013 by Oliver Nightingale
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
\ No newline at end of file
diff --git a/lunr/lunr.js b/lunr/lunr.js
new file mode 100644
index 00000000..fc329593
--- /dev/null
+++ b/lunr/lunr.js
@@ -0,0 +1,3475 @@
+/**
+ * lunr - http://lunrjs.com - A bit like Solr, but much smaller and not as bright - 2.3.9
+ * Copyright (C) 2020 Oliver Nightingale
+ * @license MIT
+ */
+
+; (function () {
+
+ /**
+ * A convenience function for configuring and constructing
+ * a new lunr Index.
+ *
+ * A lunr.Builder instance is created and the pipeline setup
+ * with a trimmer, stop word filter and stemmer.
+ *
+ * This builder object is yielded to the configuration function
+ * that is passed as a parameter, allowing the list of fields
+ * and other builder parameters to be customised.
+ *
+ * All documents _must_ be added within the passed config function.
+ *
+ * @example
+ * var idx = lunr(function () {
+ * this.field('title')
+ * this.field('body')
+ * this.ref('id')
+ *
+ * documents.forEach(function (doc) {
+ * this.add(doc)
+ * }, this)
+ * })
+ *
+ * @see {@link lunr.Builder}
+ * @see {@link lunr.Pipeline}
+ * @see {@link lunr.trimmer}
+ * @see {@link lunr.stopWordFilter}
+ * @see {@link lunr.stemmer}
+ * @namespace {function} lunr
+ */
+ var lunr = function (config) {
+ var builder = new lunr.Builder
+
+ builder.pipeline.add(
+ lunr.trimmer,
+ lunr.stopWordFilter,
+ lunr.stemmer
+ )
+
+ builder.searchPipeline.add(
+ lunr.stemmer
+ )
+
+ config.call(builder, builder)
+ return builder.build()
+ }
+
+ lunr.version = "2.3.9"
+ /*!
+ * lunr.utils
+ * Copyright (C) 2020 Oliver Nightingale
+ */
+
+ /**
+ * A namespace containing utils for the rest of the lunr library
+ * @namespace lunr.utils
+ */
+ lunr.utils = {}
+
+ /**
+ * Print a warning message to the console.
+ *
+ * @param {String} message The message to be printed.
+ * @memberOf lunr.utils
+ * @function
+ */
+ lunr.utils.warn = (function (global) {
+ /* eslint-disable no-console */
+ return function (message) {
+ if (global.console && console.warn) {
+ console.warn(message)
+ }
+ }
+ /* eslint-enable no-console */
+ })(this)
+
+ /**
+ * Convert an object to a string.
+ *
+ * In the case of `null` and `undefined` the function returns
+ * the empty string, in all other cases the result of calling
+ * `toString` on the passed object is returned.
+ *
+ * @param {Any} obj The object to convert to a string.
+ * @return {String} string representation of the passed object.
+ * @memberOf lunr.utils
+ */
+ lunr.utils.asString = function (obj) {
+ if (obj === void 0 || obj === null) {
+ return ""
+ } else {
+ return obj.toString()
+ }
+ }
+
+ /**
+ * Clones an object.
+ *
+ * Will create a copy of an existing object such that any mutations
+ * on the copy cannot affect the original.
+ *
+ * Only shallow objects are supported, passing a nested object to this
+ * function will cause a TypeError.
+ *
+ * Objects with primitives, and arrays of primitives are supported.
+ *
+ * @param {Object} obj The object to clone.
+ * @return {Object} a clone of the passed object.
+ * @throws {TypeError} when a nested object is passed.
+ * @memberOf Utils
+ */
+ lunr.utils.clone = function (obj) {
+ if (obj === null || obj === undefined) {
+ return obj
+ }
+
+ var clone = Object.create(null),
+ keys = Object.keys(obj)
+
+ for (var i = 0; i < keys.length; i++) {
+ var key = keys[i],
+ val = obj[key]
+
+ if (Array.isArray(val)) {
+ clone[key] = val.slice()
+ continue
+ }
+
+ if (typeof val === 'string' ||
+ typeof val === 'number' ||
+ typeof val === 'boolean') {
+ clone[key] = val
+ continue
+ }
+
+ throw new TypeError("clone is not deep and does not support nested objects")
+ }
+
+ return clone
+ }
+ lunr.FieldRef = function (docRef, fieldName, stringValue) {
+ this.docRef = docRef
+ this.fieldName = fieldName
+ this._stringValue = stringValue
+ }
+
+ lunr.FieldRef.joiner = "/"
+
+ lunr.FieldRef.fromString = function (s) {
+ var n = s.indexOf(lunr.FieldRef.joiner)
+
+ if (n === -1) {
+ throw "malformed field ref string"
+ }
+
+ var fieldRef = s.slice(0, n),
+ docRef = s.slice(n + 1)
+
+ return new lunr.FieldRef(docRef, fieldRef, s)
+ }
+
+ lunr.FieldRef.prototype.toString = function () {
+ if (this._stringValue == undefined) {
+ this._stringValue = this.fieldName + lunr.FieldRef.joiner + this.docRef
+ }
+
+ return this._stringValue
+ }
+ /*!
+ * lunr.Set
+ * Copyright (C) 2020 Oliver Nightingale
+ */
+
+ /**
+ * A lunr set.
+ *
+ * @constructor
+ */
+ lunr.Set = function (elements) {
+ this.elements = Object.create(null)
+
+ if (elements) {
+ this.length = elements.length
+
+ for (var i = 0; i < this.length; i++) {
+ this.elements[elements[i]] = true
+ }
+ } else {
+ this.length = 0
+ }
+ }
+
+ /**
+ * A complete set that contains all elements.
+ *
+ * @static
+ * @readonly
+ * @type {lunr.Set}
+ */
+ lunr.Set.complete = {
+ intersect: function (other) {
+ return other
+ },
+
+ union: function () {
+ return this
+ },
+
+ contains: function () {
+ return true
+ }
+ }
+
+ /**
+ * An empty set that contains no elements.
+ *
+ * @static
+ * @readonly
+ * @type {lunr.Set}
+ */
+ lunr.Set.empty = {
+ intersect: function () {
+ return this
+ },
+
+ union: function (other) {
+ return other
+ },
+
+ contains: function () {
+ return false
+ }
+ }
+
+ /**
+ * Returns true if this set contains the specified object.
+ *
+ * @param {object} object - Object whose presence in this set is to be tested.
+ * @returns {boolean} - True if this set contains the specified object.
+ */
+ lunr.Set.prototype.contains = function (object) {
+ return !!this.elements[object]
+ }
+
+ /**
+ * Returns a new set containing only the elements that are present in both
+ * this set and the specified set.
+ *
+ * @param {lunr.Set} other - set to intersect with this set.
+ * @returns {lunr.Set} a new set that is the intersection of this and the specified set.
+ */
+
+ lunr.Set.prototype.intersect = function (other) {
+ var a, b, elements, intersection = []
+
+ if (other === lunr.Set.complete) {
+ return this
+ }
+
+ if (other === lunr.Set.empty) {
+ return other
+ }
+
+ if (this.length < other.length) {
+ a = this
+ b = other
+ } else {
+ a = other
+ b = this
+ }
+
+ elements = Object.keys(a.elements)
+
+ for (var i = 0; i < elements.length; i++) {
+ var element = elements[i]
+ if (element in b.elements) {
+ intersection.push(element)
+ }
+ }
+
+ return new lunr.Set(intersection)
+ }
+
+ /**
+ * Returns a new set combining the elements of this and the specified set.
+ *
+ * @param {lunr.Set} other - set to union with this set.
+ * @return {lunr.Set} a new set that is the union of this and the specified set.
+ */
+
+ lunr.Set.prototype.union = function (other) {
+ if (other === lunr.Set.complete) {
+ return lunr.Set.complete
+ }
+
+ if (other === lunr.Set.empty) {
+ return this
+ }
+
+ return new lunr.Set(Object.keys(this.elements).concat(Object.keys(other.elements)))
+ }
+ /**
+ * A function to calculate the inverse document frequency for
+ * a posting. This is shared between the builder and the index
+ *
+ * @private
+ * @param {object} posting - The posting for a given term
+ * @param {number} documentCount - The total number of documents.
+ */
+ lunr.idf = function (posting, documentCount) {
+ var documentsWithTerm = 0
+
+ for (var fieldName in posting) {
+ if (fieldName == '_index') continue // Ignore the term index, its not a field
+ documentsWithTerm += Object.keys(posting[fieldName]).length
+ }
+
+ var x = (documentCount - documentsWithTerm + 0.5) / (documentsWithTerm + 0.5)
+
+ return Math.log(1 + Math.abs(x))
+ }
+
+ /**
+ * A token wraps a string representation of a token
+ * as it is passed through the text processing pipeline.
+ *
+ * @constructor
+ * @param {string} [str=''] - The string token being wrapped.
+ * @param {object} [metadata={}] - Metadata associated with this token.
+ */
+ lunr.Token = function (str, metadata) {
+ this.str = str || ""
+ this.metadata = metadata || {}
+ }
+
+ /**
+ * Returns the token string that is being wrapped by this object.
+ *
+ * @returns {string}
+ */
+ lunr.Token.prototype.toString = function () {
+ return this.str
+ }
+
+ /**
+ * A token update function is used when updating or optionally
+ * when cloning a token.
+ *
+ * @callback lunr.Token~updateFunction
+ * @param {string} str - The string representation of the token.
+ * @param {Object} metadata - All metadata associated with this token.
+ */
+
+ /**
+ * Applies the given function to the wrapped string token.
+ *
+ * @example
+ * token.update(function (str, metadata) {
+ * return str.toUpperCase()
+ * })
+ *
+ * @param {lunr.Token~updateFunction} fn - A function to apply to the token string.
+ * @returns {lunr.Token}
+ */
+ lunr.Token.prototype.update = function (fn) {
+ this.str = fn(this.str, this.metadata)
+ return this
+ }
+
+ /**
+ * Creates a clone of this token. Optionally a function can be
+ * applied to the cloned token.
+ *
+ * @param {lunr.Token~updateFunction} [fn] - An optional function to apply to the cloned token.
+ * @returns {lunr.Token}
+ */
+ lunr.Token.prototype.clone = function (fn) {
+ fn = fn || function (s) { return s }
+ return new lunr.Token(fn(this.str, this.metadata), this.metadata)
+ }
+ /*!
+ * lunr.tokenizer
+ * Copyright (C) 2020 Oliver Nightingale
+ */
+
+ /**
+ * A function for splitting a string into tokens ready to be inserted into
+ * the search index. Uses `lunr.tokenizer.separator` to split strings, change
+ * the value of this property to change how strings are split into tokens.
+ *
+ * This tokenizer will convert its parameter to a string by calling `toString` and
+ * then will split this string on the character in `lunr.tokenizer.separator`.
+ * Arrays will have their elements converted to strings and wrapped in a lunr.Token.
+ *
+ * Optional metadata can be passed to the tokenizer, this metadata will be cloned and
+ * added as metadata to every token that is created from the object to be tokenized.
+ *
+ * @static
+ * @param {?(string|object|object[])} obj - The object to convert into tokens
+ * @param {?object} metadata - Optional metadata to associate with every token
+ * @returns {lunr.Token[]}
+ * @see {@link lunr.Pipeline}
+ */
+ lunr.tokenizer = function (obj, metadata) {
+ if (obj == null || obj == undefined) {
+ return []
+ }
+
+ if (Array.isArray(obj)) {
+ return obj.map(function (t) {
+ return new lunr.Token(
+ lunr.utils.asString(t).toLowerCase(),
+ lunr.utils.clone(metadata)
+ )
+ })
+ }
+
+ var str = obj.toString().toLowerCase(),
+ len = str.length,
+ tokens = []
+
+ for (var sliceEnd = 0, sliceStart = 0; sliceEnd <= len; sliceEnd++) {
+ var char = str.charAt(sliceEnd),
+ sliceLength = sliceEnd - sliceStart
+
+ if ((char.match(lunr.tokenizer.separator) || sliceEnd == len)) {
+
+ if (sliceLength > 0) {
+ var tokenMetadata = lunr.utils.clone(metadata) || {}
+ tokenMetadata["position"] = [sliceStart, sliceLength]
+ tokenMetadata["index"] = tokens.length
+
+ tokens.push(
+ new lunr.Token(
+ str.slice(sliceStart, sliceEnd),
+ tokenMetadata
+ )
+ )
+ }
+
+ sliceStart = sliceEnd + 1
+ }
+
+ }
+
+ return tokens
+ }
+
+ /**
+ * The separator used to split a string into tokens. Override this property to change the behaviour of
+ * `lunr.tokenizer` behaviour when tokenizing strings. By default this splits on whitespace and hyphens.
+ *
+ * @static
+ * @see lunr.tokenizer
+ */
+ lunr.tokenizer.separator = /[\s\-]+/
+ /*!
+ * lunr.Pipeline
+ * Copyright (C) 2020 Oliver Nightingale
+ */
+
+ /**
+ * lunr.Pipelines maintain an ordered list of functions to be applied to all
+ * tokens in documents entering the search index and queries being ran against
+ * the index.
+ *
+ * An instance of lunr.Index created with the lunr shortcut will contain a
+ * pipeline with a stop word filter and an English language stemmer. Extra
+ * functions can be added before or after either of these functions or these
+ * default functions can be removed.
+ *
+ * When run the pipeline will call each function in turn, passing a token, the
+ * index of that token in the original list of all tokens and finally a list of
+ * all the original tokens.
+ *
+ * The output of functions in the pipeline will be passed to the next function
+ * in the pipeline. To exclude a token from entering the index the function
+ * should return undefined, the rest of the pipeline will not be called with
+ * this token.
+ *
+ * For serialisation of pipelines to work, all functions used in an instance of
+ * a pipeline should be registered with lunr.Pipeline. Registered functions can
+ * then be loaded. If trying to load a serialised pipeline that uses functions
+ * that are not registered an error will be thrown.
+ *
+ * If not planning on serialising the pipeline then registering pipeline functions
+ * is not necessary.
+ *
+ * @constructor
+ */
+ lunr.Pipeline = function () {
+ this._stack = []
+ }
+
+ lunr.Pipeline.registeredFunctions = Object.create(null)
+
+ /**
+ * A pipeline function maps lunr.Token to lunr.Token. A lunr.Token contains the token
+ * string as well as all known metadata. A pipeline function can mutate the token string
+ * or mutate (or add) metadata for a given token.
+ *
+ * A pipeline function can indicate that the passed token should be discarded by returning
+ * null, undefined or an empty string. This token will not be passed to any downstream pipeline
+ * functions and will not be added to the index.
+ *
+ * Multiple tokens can be returned by returning an array of tokens. Each token will be passed
+ * to any downstream pipeline functions and all will returned tokens will be added to the index.
+ *
+ * Any number of pipeline functions may be chained together using a lunr.Pipeline.
+ *
+ * @interface lunr.PipelineFunction
+ * @param {lunr.Token} token - A token from the document being processed.
+ * @param {number} i - The index of this token in the complete list of tokens for this document/field.
+ * @param {lunr.Token[]} tokens - All tokens for this document/field.
+ * @returns {(?lunr.Token|lunr.Token[])}
+ */
+
+ /**
+ * Register a function with the pipeline.
+ *
+ * Functions that are used in the pipeline should be registered if the pipeline
+ * needs to be serialised, or a serialised pipeline needs to be loaded.
+ *
+ * Registering a function does not add it to a pipeline, functions must still be
+ * added to instances of the pipeline for them to be used when running a pipeline.
+ *
+ * @param {lunr.PipelineFunction} fn - The function to check for.
+ * @param {String} label - The label to register this function with
+ */
+ lunr.Pipeline.registerFunction = function (fn, label) {
+ if (label in this.registeredFunctions) {
+ lunr.utils.warn('Overwriting existing registered function: ' + label)
+ }
+
+ fn.label = label
+ lunr.Pipeline.registeredFunctions[fn.label] = fn
+ }
+
+ /**
+ * Warns if the function is not registered as a Pipeline function.
+ *
+ * @param {lunr.PipelineFunction} fn - The function to check for.
+ * @private
+ */
+ lunr.Pipeline.warnIfFunctionNotRegistered = function (fn) {
+ var isRegistered = fn.label && (fn.label in this.registeredFunctions)
+
+ if (!isRegistered) {
+ lunr.utils.warn('Function is not registered with pipeline. This may cause problems when serialising the index.\n', fn)
+ }
+ }
+
+ /**
+ * Loads a previously serialised pipeline.
+ *
+ * All functions to be loaded must already be registered with lunr.Pipeline.
+ * If any function from the serialised data has not been registered then an
+ * error will be thrown.
+ *
+ * @param {Object} serialised - The serialised pipeline to load.
+ * @returns {lunr.Pipeline}
+ */
+ lunr.Pipeline.load = function (serialised) {
+ var pipeline = new lunr.Pipeline
+
+ serialised.forEach(function (fnName) {
+ var fn = lunr.Pipeline.registeredFunctions[fnName]
+
+ if (fn) {
+ pipeline.add(fn)
+ } else {
+ throw new Error('Cannot load unregistered function: ' + fnName)
+ }
+ })
+
+ return pipeline
+ }
+
+ /**
+ * Adds new functions to the end of the pipeline.
+ *
+ * Logs a warning if the function has not been registered.
+ *
+ * @param {lunr.PipelineFunction[]} functions - Any number of functions to add to the pipeline.
+ */
+ lunr.Pipeline.prototype.add = function () {
+ var fns = Array.prototype.slice.call(arguments)
+
+ fns.forEach(function (fn) {
+ lunr.Pipeline.warnIfFunctionNotRegistered(fn)
+ this._stack.push(fn)
+ }, this)
+ }
+
+ /**
+ * Adds a single function after a function that already exists in the
+ * pipeline.
+ *
+ * Logs a warning if the function has not been registered.
+ *
+ * @param {lunr.PipelineFunction} existingFn - A function that already exists in the pipeline.
+ * @param {lunr.PipelineFunction} newFn - The new function to add to the pipeline.
+ */
+ lunr.Pipeline.prototype.after = function (existingFn, newFn) {
+ lunr.Pipeline.warnIfFunctionNotRegistered(newFn)
+
+ var pos = this._stack.indexOf(existingFn)
+ if (pos == -1) {
+ throw new Error('Cannot find existingFn')
+ }
+
+ pos = pos + 1
+ this._stack.splice(pos, 0, newFn)
+ }
+
+ /**
+ * Adds a single function before a function that already exists in the
+ * pipeline.
+ *
+ * Logs a warning if the function has not been registered.
+ *
+ * @param {lunr.PipelineFunction} existingFn - A function that already exists in the pipeline.
+ * @param {lunr.PipelineFunction} newFn - The new function to add to the pipeline.
+ */
+ lunr.Pipeline.prototype.before = function (existingFn, newFn) {
+ lunr.Pipeline.warnIfFunctionNotRegistered(newFn)
+
+ var pos = this._stack.indexOf(existingFn)
+ if (pos == -1) {
+ throw new Error('Cannot find existingFn')
+ }
+
+ this._stack.splice(pos, 0, newFn)
+ }
+
+ /**
+ * Removes a function from the pipeline.
+ *
+ * @param {lunr.PipelineFunction} fn The function to remove from the pipeline.
+ */
+ lunr.Pipeline.prototype.remove = function (fn) {
+ var pos = this._stack.indexOf(fn)
+ if (pos == -1) {
+ return
+ }
+
+ this._stack.splice(pos, 1)
+ }
+
+ /**
+ * Runs the current list of functions that make up the pipeline against the
+ * passed tokens.
+ *
+ * @param {Array} tokens The tokens to run through the pipeline.
+ * @returns {Array}
+ */
+ lunr.Pipeline.prototype.run = function (tokens) {
+ var stackLength = this._stack.length
+
+ for (var i = 0; i < stackLength; i++) {
+ var fn = this._stack[i]
+ var memo = []
+
+ for (var j = 0; j < tokens.length; j++) {
+ var result = fn(tokens[j], j, tokens)
+
+ if (result === null || result === void 0 || result === '') continue
+
+ if (Array.isArray(result)) {
+ for (var k = 0; k < result.length; k++) {
+ memo.push(result[k])
+ }
+ } else {
+ memo.push(result)
+ }
+ }
+
+ tokens = memo
+ }
+
+ return tokens
+ }
+
+ /**
+ * Convenience method for passing a string through a pipeline and getting
+ * strings out. This method takes care of wrapping the passed string in a
+ * token and mapping the resulting tokens back to strings.
+ *
+ * @param {string} str - The string to pass through the pipeline.
+ * @param {?object} metadata - Optional metadata to associate with the token
+ * passed to the pipeline.
+ * @returns {string[]}
+ */
+ lunr.Pipeline.prototype.runString = function (str, metadata) {
+ var token = new lunr.Token(str, metadata)
+
+ return this.run([token]).map(function (t) {
+ return t.toString()
+ })
+ }
+
+ /**
+ * Resets the pipeline by removing any existing processors.
+ *
+ */
+ lunr.Pipeline.prototype.reset = function () {
+ this._stack = []
+ }
+
+ /**
+ * Returns a representation of the pipeline ready for serialisation.
+ *
+ * Logs a warning if the function has not been registered.
+ *
+ * @returns {Array}
+ */
+ lunr.Pipeline.prototype.toJSON = function () {
+ return this._stack.map(function (fn) {
+ lunr.Pipeline.warnIfFunctionNotRegistered(fn)
+
+ return fn.label
+ })
+ }
+ /*!
+ * lunr.Vector
+ * Copyright (C) 2020 Oliver Nightingale
+ */
+
+ /**
+ * A vector is used to construct the vector space of documents and queries. These
+ * vectors support operations to determine the similarity between two documents or
+ * a document and a query.
+ *
+ * Normally no parameters are required for initializing a vector, but in the case of
+ * loading a previously dumped vector the raw elements can be provided to the constructor.
+ *
+ * For performance reasons vectors are implemented with a flat array, where an elements
+ * index is immediately followed by its value. E.g. [index, value, index, value]. This
+ * allows the underlying array to be as sparse as possible and still offer decent
+ * performance when being used for vector calculations.
+ *
+ * @constructor
+ * @param {Number[]} [elements] - The flat list of element index and element value pairs.
+ */
+ lunr.Vector = function (elements) {
+ this._magnitude = 0
+ this.elements = elements || []
+ }
+
+
+ /**
+ * Calculates the position within the vector to insert a given index.
+ *
+ * This is used internally by insert and upsert. If there are duplicate indexes then
+ * the position is returned as if the value for that index were to be updated, but it
+ * is the callers responsibility to check whether there is a duplicate at that index
+ *
+ * @param {Number} insertIdx - The index at which the element should be inserted.
+ * @returns {Number}
+ */
+ lunr.Vector.prototype.positionForIndex = function (index) {
+ // For an empty vector the tuple can be inserted at the beginning
+ if (this.elements.length == 0) {
+ return 0
+ }
+
+ var start = 0,
+ end = this.elements.length / 2,
+ sliceLength = end - start,
+ pivotPoint = Math.floor(sliceLength / 2),
+ pivotIndex = this.elements[pivotPoint * 2]
+
+ while (sliceLength > 1) {
+ if (pivotIndex < index) {
+ start = pivotPoint
+ }
+
+ if (pivotIndex > index) {
+ end = pivotPoint
+ }
+
+ if (pivotIndex == index) {
+ break
+ }
+
+ sliceLength = end - start
+ pivotPoint = start + Math.floor(sliceLength / 2)
+ pivotIndex = this.elements[pivotPoint * 2]
+ }
+
+ if (pivotIndex == index) {
+ return pivotPoint * 2
+ }
+
+ if (pivotIndex > index) {
+ return pivotPoint * 2
+ }
+
+ if (pivotIndex < index) {
+ return (pivotPoint + 1) * 2
+ }
+ }
+
+ /**
+ * Inserts an element at an index within the vector.
+ *
+ * Does not allow duplicates, will throw an error if there is already an entry
+ * for this index.
+ *
+ * @param {Number} insertIdx - The index at which the element should be inserted.
+ * @param {Number} val - The value to be inserted into the vector.
+ */
+ lunr.Vector.prototype.insert = function (insertIdx, val) {
+ this.upsert(insertIdx, val, function () {
+ throw "duplicate index"
+ })
+ }
+
+ /**
+ * Inserts or updates an existing index within the vector.
+ *
+ * @param {Number} insertIdx - The index at which the element should be inserted.
+ * @param {Number} val - The value to be inserted into the vector.
+ * @param {function} fn - A function that is called for updates, the existing value and the
+ * requested value are passed as arguments
+ */
+ lunr.Vector.prototype.upsert = function (insertIdx, val, fn) {
+ this._magnitude = 0
+ var position = this.positionForIndex(insertIdx)
+
+ if (this.elements[position] == insertIdx) {
+ this.elements[position + 1] = fn(this.elements[position + 1], val)
+ } else {
+ this.elements.splice(position, 0, insertIdx, val)
+ }
+ }
+
+ /**
+ * Calculates the magnitude of this vector.
+ *
+ * @returns {Number}
+ */
+ lunr.Vector.prototype.magnitude = function () {
+ if (this._magnitude) return this._magnitude
+
+ var sumOfSquares = 0,
+ elementsLength = this.elements.length
+
+ for (var i = 1; i < elementsLength; i += 2) {
+ var val = this.elements[i]
+ sumOfSquares += val * val
+ }
+
+ return this._magnitude = Math.sqrt(sumOfSquares)
+ }
+
+ /**
+ * Calculates the dot product of this vector and another vector.
+ *
+ * @param {lunr.Vector} otherVector - The vector to compute the dot product with.
+ * @returns {Number}
+ */
+ lunr.Vector.prototype.dot = function (otherVector) {
+ var dotProduct = 0,
+ a = this.elements, b = otherVector.elements,
+ aLen = a.length, bLen = b.length,
+ aVal = 0, bVal = 0,
+ i = 0, j = 0
+
+ while (i < aLen && j < bLen) {
+ aVal = a[i], bVal = b[j]
+ if (aVal < bVal) {
+ i += 2
+ } else if (aVal > bVal) {
+ j += 2
+ } else if (aVal == bVal) {
+ dotProduct += a[i + 1] * b[j + 1]
+ i += 2
+ j += 2
+ }
+ }
+
+ return dotProduct
+ }
+
+ /**
+ * Calculates the similarity between this vector and another vector.
+ *
+ * @param {lunr.Vector} otherVector - The other vector to calculate the
+ * similarity with.
+ * @returns {Number}
+ */
+ lunr.Vector.prototype.similarity = function (otherVector) {
+ return this.dot(otherVector) / this.magnitude() || 0
+ }
+
+ /**
+ * Converts the vector to an array of the elements within the vector.
+ *
+ * @returns {Number[]}
+ */
+ lunr.Vector.prototype.toArray = function () {
+ var output = new Array(this.elements.length / 2)
+
+ for (var i = 1, j = 0; i < this.elements.length; i += 2, j++) {
+ output[j] = this.elements[i]
+ }
+
+ return output
+ }
+
+ /**
+ * A JSON serializable representation of the vector.
+ *
+ * @returns {Number[]}
+ */
+ lunr.Vector.prototype.toJSON = function () {
+ return this.elements
+ }
+ /* eslint-disable */
+ /*!
+ * lunr.stemmer
+ * Copyright (C) 2020 Oliver Nightingale
+ * Includes code from - http://tartarus.org/~martin/PorterStemmer/js.txt
+ */
+
+ /**
+ * lunr.stemmer is an english language stemmer, this is a JavaScript
+ * implementation of the PorterStemmer taken from http://tartarus.org/~martin
+ *
+ * @static
+ * @implements {lunr.PipelineFunction}
+ * @param {lunr.Token} token - The string to stem
+ * @returns {lunr.Token}
+ * @see {@link lunr.Pipeline}
+ * @function
+ */
+ lunr.stemmer = (function () {
+ var step2list = {
+ "ational": "ate",
+ "tional": "tion",
+ "enci": "ence",
+ "anci": "ance",
+ "izer": "ize",
+ "bli": "ble",
+ "alli": "al",
+ "entli": "ent",
+ "eli": "e",
+ "ousli": "ous",
+ "ization": "ize",
+ "ation": "ate",
+ "ator": "ate",
+ "alism": "al",
+ "iveness": "ive",
+ "fulness": "ful",
+ "ousness": "ous",
+ "aliti": "al",
+ "iviti": "ive",
+ "biliti": "ble",
+ "logi": "log"
+ },
+
+ step3list = {
+ "icate": "ic",
+ "ative": "",
+ "alize": "al",
+ "iciti": "ic",
+ "ical": "ic",
+ "ful": "",
+ "ness": ""
+ },
+
+ c = "[^aeiou]", // consonant
+ v = "[aeiouy]", // vowel
+ C = c + "[^aeiouy]*", // consonant sequence
+ V = v + "[aeiou]*", // vowel sequence
+
+ mgr0 = "^(" + C + ")?" + V + C, // [C]VC... is m>0
+ meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$", // [C]VC[V] is m=1
+ mgr1 = "^(" + C + ")?" + V + C + V + C, // [C]VCVC... is m>1
+ s_v = "^(" + C + ")?" + v; // vowel in stem
+
+ var re_mgr0 = new RegExp(mgr0);
+ var re_mgr1 = new RegExp(mgr1);
+ var re_meq1 = new RegExp(meq1);
+ var re_s_v = new RegExp(s_v);
+
+ var re_1a = /^(.+?)(ss|i)es$/;
+ var re2_1a = /^(.+?)([^s])s$/;
+ var re_1b = /^(.+?)eed$/;
+ var re2_1b = /^(.+?)(ed|ing)$/;
+ var re_1b_2 = /.$/;
+ var re2_1b_2 = /(at|bl|iz)$/;
+ var re3_1b_2 = new RegExp("([^aeiouylsz])\\1$");
+ var re4_1b_2 = new RegExp("^" + C + v + "[^aeiouwxy]$");
+
+ var re_1c = /^(.+?[^aeiou])y$/;
+ var re_2 = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/;
+
+ var re_3 = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/;
+
+ var re_4 = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/;
+ var re2_4 = /^(.+?)(s|t)(ion)$/;
+
+ var re_5 = /^(.+?)e$/;
+ var re_5_1 = /ll$/;
+ var re3_5 = new RegExp("^" + C + v + "[^aeiouwxy]$");
+
+ var porterStemmer = function porterStemmer(w) {
+ var stem,
+ suffix,
+ firstch,
+ re,
+ re2,
+ re3,
+ re4;
+
+ if (w.length < 3) { return w; }
+
+ firstch = w.substr(0, 1);
+ if (firstch == "y") {
+ w = firstch.toUpperCase() + w.substr(1);
+ }
+
+ // Step 1a
+ re = re_1a
+ re2 = re2_1a;
+
+ if (re.test(w)) { w = w.replace(re, "$1$2"); }
+ else if (re2.test(w)) { w = w.replace(re2, "$1$2"); }
+
+ // Step 1b
+ re = re_1b;
+ re2 = re2_1b;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ re = re_mgr0;
+ if (re.test(fp[1])) {
+ re = re_1b_2;
+ w = w.replace(re, "");
+ }
+ } else if (re2.test(w)) {
+ var fp = re2.exec(w);
+ stem = fp[1];
+ re2 = re_s_v;
+ if (re2.test(stem)) {
+ w = stem;
+ re2 = re2_1b_2;
+ re3 = re3_1b_2;
+ re4 = re4_1b_2;
+ if (re2.test(w)) { w = w + "e"; }
+ else if (re3.test(w)) { re = re_1b_2; w = w.replace(re, ""); }
+ else if (re4.test(w)) { w = w + "e"; }
+ }
+ }
+
+ // Step 1c - replace suffix y or Y by i if preceded by a non-vowel which is not the first letter of the word (so cry -> cri, by -> by, say -> say)
+ re = re_1c;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ w = stem + "i";
+ }
+
+ // Step 2
+ re = re_2;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ suffix = fp[2];
+ re = re_mgr0;
+ if (re.test(stem)) {
+ w = stem + step2list[suffix];
+ }
+ }
+
+ // Step 3
+ re = re_3;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ suffix = fp[2];
+ re = re_mgr0;
+ if (re.test(stem)) {
+ w = stem + step3list[suffix];
+ }
+ }
+
+ // Step 4
+ re = re_4;
+ re2 = re2_4;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ re = re_mgr1;
+ if (re.test(stem)) {
+ w = stem;
+ }
+ } else if (re2.test(w)) {
+ var fp = re2.exec(w);
+ stem = fp[1] + fp[2];
+ re2 = re_mgr1;
+ if (re2.test(stem)) {
+ w = stem;
+ }
+ }
+
+ // Step 5
+ re = re_5;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ re = re_mgr1;
+ re2 = re_meq1;
+ re3 = re3_5;
+ if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) {
+ w = stem;
+ }
+ }
+
+ re = re_5_1;
+ re2 = re_mgr1;
+ if (re.test(w) && re2.test(w)) {
+ re = re_1b_2;
+ w = w.replace(re, "");
+ }
+
+ // and turn initial Y back to y
+
+ if (firstch == "y") {
+ w = firstch.toLowerCase() + w.substr(1);
+ }
+
+ return w;
+ };
+
+ return function (token) {
+ return token.update(porterStemmer);
+ }
+ })();
+
+ lunr.Pipeline.registerFunction(lunr.stemmer, 'stemmer')
+ /*!
+ * lunr.stopWordFilter
+ * Copyright (C) 2020 Oliver Nightingale
+ */
+
+ /**
+ * lunr.generateStopWordFilter builds a stopWordFilter function from the provided
+ * list of stop words.
+ *
+ * The built in lunr.stopWordFilter is built using this generator and can be used
+ * to generate custom stopWordFilters for applications or non English languages.
+ *
+ * @function
+ * @param {Array} token The token to pass through the filter
+ * @returns {lunr.PipelineFunction}
+ * @see lunr.Pipeline
+ * @see lunr.stopWordFilter
+ */
+ lunr.generateStopWordFilter = function (stopWords) {
+ var words = stopWords.reduce(function (memo, stopWord) {
+ memo[stopWord] = stopWord
+ return memo
+ }, {})
+
+ return function (token) {
+ if (token && words[token.toString()] !== token.toString()) return token
+ }
+ }
+
+ /**
+ * lunr.stopWordFilter is an English language stop word list filter, any words
+ * contained in the list will not be passed through the filter.
+ *
+ * This is intended to be used in the Pipeline. If the token does not pass the
+ * filter then undefined will be returned.
+ *
+ * @function
+ * @implements {lunr.PipelineFunction}
+ * @params {lunr.Token} token - A token to check for being a stop word.
+ * @returns {lunr.Token}
+ * @see {@link lunr.Pipeline}
+ */
+ lunr.stopWordFilter = lunr.generateStopWordFilter([
+ 'a',
+ 'able',
+ 'about',
+ 'across',
+ 'after',
+ 'all',
+ 'almost',
+ 'also',
+ 'am',
+ 'among',
+ 'an',
+ 'and',
+ 'any',
+ 'are',
+ 'as',
+ 'at',
+ 'be',
+ 'because',
+ 'been',
+ 'but',
+ 'by',
+ 'can',
+ 'cannot',
+ 'could',
+ 'dear',
+ 'did',
+ 'do',
+ 'does',
+ 'either',
+ 'else',
+ 'ever',
+ 'every',
+ 'for',
+ 'from',
+ 'get',
+ 'got',
+ 'had',
+ 'has',
+ 'have',
+ 'he',
+ 'her',
+ 'hers',
+ 'him',
+ 'his',
+ 'how',
+ 'however',
+ 'i',
+ 'if',
+ 'in',
+ 'into',
+ 'is',
+ 'it',
+ 'its',
+ 'just',
+ 'least',
+ 'let',
+ 'like',
+ 'likely',
+ 'may',
+ 'me',
+ 'might',
+ 'most',
+ 'must',
+ 'my',
+ 'neither',
+ 'no',
+ 'nor',
+ 'not',
+ 'of',
+ 'off',
+ 'often',
+ 'on',
+ 'only',
+ 'or',
+ 'other',
+ 'our',
+ 'own',
+ 'rather',
+ 'said',
+ 'say',
+ 'says',
+ 'she',
+ 'should',
+ 'since',
+ 'so',
+ 'some',
+ 'than',
+ 'that',
+ 'the',
+ 'their',
+ 'them',
+ 'then',
+ 'there',
+ 'these',
+ 'they',
+ 'this',
+ 'tis',
+ 'to',
+ 'too',
+ 'twas',
+ 'us',
+ 'wants',
+ 'was',
+ 'we',
+ 'were',
+ 'what',
+ 'when',
+ 'where',
+ 'which',
+ 'while',
+ 'who',
+ 'whom',
+ 'why',
+ 'will',
+ 'with',
+ 'would',
+ 'yet',
+ 'you',
+ 'your'
+ ])
+
+ lunr.Pipeline.registerFunction(lunr.stopWordFilter, 'stopWordFilter')
+ /*!
+ * lunr.trimmer
+ * Copyright (C) 2020 Oliver Nightingale
+ */
+
+ /**
+ * lunr.trimmer is a pipeline function for trimming non word
+ * characters from the beginning and end of tokens before they
+ * enter the index.
+ *
+ * This implementation may not work correctly for non latin
+ * characters and should either be removed or adapted for use
+ * with languages with non-latin characters.
+ *
+ * @static
+ * @implements {lunr.PipelineFunction}
+ * @param {lunr.Token} token The token to pass through the filter
+ * @returns {lunr.Token}
+ * @see lunr.Pipeline
+ */
+ lunr.trimmer = function (token) {
+ return token.update(function (s) {
+ return s.replace(/^\W+/, '').replace(/\W+$/, '')
+ })
+ }
+
+ lunr.Pipeline.registerFunction(lunr.trimmer, 'trimmer')
+ /*!
+ * lunr.TokenSet
+ * Copyright (C) 2020 Oliver Nightingale
+ */
+
+ /**
+ * A token set is used to store the unique list of all tokens
+ * within an index. Token sets are also used to represent an
+ * incoming query to the index, this query token set and index
+ * token set are then intersected to find which tokens to look
+ * up in the inverted index.
+ *
+ * A token set can hold multiple tokens, as in the case of the
+ * index token set, or it can hold a single token as in the
+ * case of a simple query token set.
+ *
+ * Additionally token sets are used to perform wildcard matching.
+ * Leading, contained and trailing wildcards are supported, and
+ * from this edit distance matching can also be provided.
+ *
+ * Token sets are implemented as a minimal finite state automata,
+ * where both common prefixes and suffixes are shared between tokens.
+ * This helps to reduce the space used for storing the token set.
+ *
+ * @constructor
+ */
+ lunr.TokenSet = function () {
+ this.final = false
+ this.edges = {}
+ this.id = lunr.TokenSet._nextId
+ lunr.TokenSet._nextId += 1
+ }
+
+ /**
+ * Keeps track of the next, auto increment, identifier to assign
+ * to a new tokenSet.
+ *
+ * TokenSets require a unique identifier to be correctly minimised.
+ *
+ * @private
+ */
+ lunr.TokenSet._nextId = 1
+
+ /**
+ * Creates a TokenSet instance from the given sorted array of words.
+ *
+ * @param {String[]} arr - A sorted array of strings to create the set from.
+ * @returns {lunr.TokenSet}
+ * @throws Will throw an error if the input array is not sorted.
+ */
+ lunr.TokenSet.fromArray = function (arr) {
+ var builder = new lunr.TokenSet.Builder
+
+ for (var i = 0, len = arr.length; i < len; i++) {
+ builder.insert(arr[i])
+ }
+
+ builder.finish()
+ return builder.root
+ }
+
+ /**
+ * Creates a token set from a query clause.
+ *
+ * @private
+ * @param {Object} clause - A single clause from lunr.Query.
+ * @param {string} clause.term - The query clause term.
+ * @param {number} [clause.editDistance] - The optional edit distance for the term.
+ * @returns {lunr.TokenSet}
+ */
+ lunr.TokenSet.fromClause = function (clause) {
+ if ('editDistance' in clause) {
+ return lunr.TokenSet.fromFuzzyString(clause.term, clause.editDistance)
+ } else {
+ return lunr.TokenSet.fromString(clause.term)
+ }
+ }
+
+ /**
+ * Creates a token set representing a single string with a specified
+ * edit distance.
+ *
+ * Insertions, deletions, substitutions and transpositions are each
+ * treated as an edit distance of 1.
+ *
+ * Increasing the allowed edit distance will have a dramatic impact
+ * on the performance of both creating and intersecting these TokenSets.
+ * It is advised to keep the edit distance less than 3.
+ *
+ * @param {string} str - The string to create the token set from.
+ * @param {number} editDistance - The allowed edit distance to match.
+ * @returns {lunr.Vector}
+ */
+ lunr.TokenSet.fromFuzzyString = function (str, editDistance) {
+ var root = new lunr.TokenSet
+
+ var stack = [{
+ node: root,
+ editsRemaining: editDistance,
+ str: str
+ }]
+
+ while (stack.length) {
+ var frame = stack.pop()
+
+ // no edit
+ if (frame.str.length > 0) {
+ var char = frame.str.charAt(0),
+ noEditNode
+
+ if (char in frame.node.edges) {
+ noEditNode = frame.node.edges[char]
+ } else {
+ noEditNode = new lunr.TokenSet
+ frame.node.edges[char] = noEditNode
+ }
+
+ if (frame.str.length == 1) {
+ noEditNode.final = true
+ }
+
+ stack.push({
+ node: noEditNode,
+ editsRemaining: frame.editsRemaining,
+ str: frame.str.slice(1)
+ })
+ }
+
+ if (frame.editsRemaining == 0) {
+ continue
+ }
+
+ // insertion
+ if ("*" in frame.node.edges) {
+ var insertionNode = frame.node.edges["*"]
+ } else {
+ var insertionNode = new lunr.TokenSet
+ frame.node.edges["*"] = insertionNode
+ }
+
+ if (frame.str.length == 0) {
+ insertionNode.final = true
+ }
+
+ stack.push({
+ node: insertionNode,
+ editsRemaining: frame.editsRemaining - 1,
+ str: frame.str
+ })
+
+ // deletion
+ // can only do a deletion if we have enough edits remaining
+ // and if there are characters left to delete in the string
+ if (frame.str.length > 1) {
+ stack.push({
+ node: frame.node,
+ editsRemaining: frame.editsRemaining - 1,
+ str: frame.str.slice(1)
+ })
+ }
+
+ // deletion
+ // just removing the last character from the str
+ if (frame.str.length == 1) {
+ frame.node.final = true
+ }
+
+ // substitution
+ // can only do a substitution if we have enough edits remaining
+ // and if there are characters left to substitute
+ if (frame.str.length >= 1) {
+ if ("*" in frame.node.edges) {
+ var substitutionNode = frame.node.edges["*"]
+ } else {
+ var substitutionNode = new lunr.TokenSet
+ frame.node.edges["*"] = substitutionNode
+ }
+
+ if (frame.str.length == 1) {
+ substitutionNode.final = true
+ }
+
+ stack.push({
+ node: substitutionNode,
+ editsRemaining: frame.editsRemaining - 1,
+ str: frame.str.slice(1)
+ })
+ }
+
+ // transposition
+ // can only do a transposition if there are edits remaining
+ // and there are enough characters to transpose
+ if (frame.str.length > 1) {
+ var charA = frame.str.charAt(0),
+ charB = frame.str.charAt(1),
+ transposeNode
+
+ if (charB in frame.node.edges) {
+ transposeNode = frame.node.edges[charB]
+ } else {
+ transposeNode = new lunr.TokenSet
+ frame.node.edges[charB] = transposeNode
+ }
+
+ if (frame.str.length == 1) {
+ transposeNode.final = true
+ }
+
+ stack.push({
+ node: transposeNode,
+ editsRemaining: frame.editsRemaining - 1,
+ str: charA + frame.str.slice(2)
+ })
+ }
+ }
+
+ return root
+ }
+
+ /**
+ * Creates a TokenSet from a string.
+ *
+ * The string may contain one or more wildcard characters (*)
+ * that will allow wildcard matching when intersecting with
+ * another TokenSet.
+ *
+ * @param {string} str - The string to create a TokenSet from.
+ * @returns {lunr.TokenSet}
+ */
+ lunr.TokenSet.fromString = function (str) {
+ var node = new lunr.TokenSet,
+ root = node
+
+ /*
+ * Iterates through all characters within the passed string
+ * appending a node for each character.
+ *
+ * When a wildcard character is found then a self
+ * referencing edge is introduced to continually match
+ * any number of any characters.
+ */
+ for (var i = 0, len = str.length; i < len; i++) {
+ var char = str[i],
+ final = (i == len - 1)
+
+ if (char == "*") {
+ node.edges[char] = node
+ node.final = final
+
+ } else {
+ var next = new lunr.TokenSet
+ next.final = final
+
+ node.edges[char] = next
+ node = next
+ }
+ }
+
+ return root
+ }
+
+ /**
+ * Converts this TokenSet into an array of strings
+ * contained within the TokenSet.
+ *
+ * This is not intended to be used on a TokenSet that
+ * contains wildcards, in these cases the results are
+ * undefined and are likely to cause an infinite loop.
+ *
+ * @returns {string[]}
+ */
+ lunr.TokenSet.prototype.toArray = function () {
+ var words = []
+
+ var stack = [{
+ prefix: "",
+ node: this
+ }]
+
+ while (stack.length) {
+ var frame = stack.pop(),
+ edges = Object.keys(frame.node.edges),
+ len = edges.length
+
+ if (frame.node.final) {
+ /* In Safari, at this point the prefix is sometimes corrupted, see:
+ * https://github.com/olivernn/lunr.js/issues/279 Calling any
+ * String.prototype method forces Safari to "cast" this string to what
+ * it's supposed to be, fixing the bug. */
+ frame.prefix.charAt(0)
+ words.push(frame.prefix)
+ }
+
+ for (var i = 0; i < len; i++) {
+ var edge = edges[i]
+
+ stack.push({
+ prefix: frame.prefix.concat(edge),
+ node: frame.node.edges[edge]
+ })
+ }
+ }
+
+ return words
+ }
+
+ /**
+ * Generates a string representation of a TokenSet.
+ *
+ * This is intended to allow TokenSets to be used as keys
+ * in objects, largely to aid the construction and minimisation
+ * of a TokenSet. As such it is not designed to be a human
+ * friendly representation of the TokenSet.
+ *
+ * @returns {string}
+ */
+ lunr.TokenSet.prototype.toString = function () {
+ // NOTE: Using Object.keys here as this.edges is very likely
+ // to enter 'hash-mode' with many keys being added
+ //
+ // avoiding a for-in loop here as it leads to the function
+ // being de-optimised (at least in V8). From some simple
+ // benchmarks the performance is comparable, but allowing
+ // V8 to optimize may mean easy performance wins in the future.
+
+ if (this._str) {
+ return this._str
+ }
+
+ var str = this.final ? '1' : '0',
+ labels = Object.keys(this.edges).sort(),
+ len = labels.length
+
+ for (var i = 0; i < len; i++) {
+ var label = labels[i],
+ node = this.edges[label]
+
+ str = str + label + node.id
+ }
+
+ return str
+ }
+
+ /**
+ * Returns a new TokenSet that is the intersection of
+ * this TokenSet and the passed TokenSet.
+ *
+ * This intersection will take into account any wildcards
+ * contained within the TokenSet.
+ *
+ * @param {lunr.TokenSet} b - An other TokenSet to intersect with.
+ * @returns {lunr.TokenSet}
+ */
+ lunr.TokenSet.prototype.intersect = function (b) {
+ var output = new lunr.TokenSet,
+ frame = undefined
+
+ var stack = [{
+ qNode: b,
+ output: output,
+ node: this
+ }]
+
+ while (stack.length) {
+ frame = stack.pop()
+
+ // NOTE: As with the #toString method, we are using
+ // Object.keys and a for loop instead of a for-in loop
+ // as both of these objects enter 'hash' mode, causing
+ // the function to be de-optimised in V8
+ var qEdges = Object.keys(frame.qNode.edges),
+ qLen = qEdges.length,
+ nEdges = Object.keys(frame.node.edges),
+ nLen = nEdges.length
+
+ for (var q = 0; q < qLen; q++) {
+ var qEdge = qEdges[q]
+
+ for (var n = 0; n < nLen; n++) {
+ var nEdge = nEdges[n]
+
+ if (nEdge == qEdge || qEdge == '*') {
+ var node = frame.node.edges[nEdge],
+ qNode = frame.qNode.edges[qEdge],
+ final = node.final && qNode.final,
+ next = undefined
+
+ if (nEdge in frame.output.edges) {
+ // an edge already exists for this character
+ // no need to create a new node, just set the finality
+ // bit unless this node is already final
+ next = frame.output.edges[nEdge]
+ next.final = next.final || final
+
+ } else {
+ // no edge exists yet, must create one
+ // set the finality bit and insert it
+ // into the output
+ next = new lunr.TokenSet
+ next.final = final
+ frame.output.edges[nEdge] = next
+ }
+
+ stack.push({
+ qNode: qNode,
+ output: next,
+ node: node
+ })
+ }
+ }
+ }
+ }
+
+ return output
+ }
+ lunr.TokenSet.Builder = function () {
+ this.previousWord = ""
+ this.root = new lunr.TokenSet
+ this.uncheckedNodes = []
+ this.minimizedNodes = {}
+ }
+
+ lunr.TokenSet.Builder.prototype.insert = function (word) {
+ var node,
+ commonPrefix = 0
+
+ if (word < this.previousWord) {
+ throw new Error("Out of order word insertion")
+ }
+
+ for (var i = 0; i < word.length && i < this.previousWord.length; i++) {
+ if (word[i] != this.previousWord[i]) break
+ commonPrefix++
+ }
+
+ this.minimize(commonPrefix)
+
+ if (this.uncheckedNodes.length == 0) {
+ node = this.root
+ } else {
+ node = this.uncheckedNodes[this.uncheckedNodes.length - 1].child
+ }
+
+ for (var i = commonPrefix; i < word.length; i++) {
+ var nextNode = new lunr.TokenSet,
+ char = word[i]
+
+ node.edges[char] = nextNode
+
+ this.uncheckedNodes.push({
+ parent: node,
+ char: char,
+ child: nextNode
+ })
+
+ node = nextNode
+ }
+
+ node.final = true
+ this.previousWord = word
+ }
+
+ lunr.TokenSet.Builder.prototype.finish = function () {
+ this.minimize(0)
+ }
+
+ lunr.TokenSet.Builder.prototype.minimize = function (downTo) {
+ for (var i = this.uncheckedNodes.length - 1; i >= downTo; i--) {
+ var node = this.uncheckedNodes[i],
+ childKey = node.child.toString()
+
+ if (childKey in this.minimizedNodes) {
+ node.parent.edges[node.char] = this.minimizedNodes[childKey]
+ } else {
+ // Cache the key for this node since
+ // we know it can't change anymore
+ node.child._str = childKey
+
+ this.minimizedNodes[childKey] = node.child
+ }
+
+ this.uncheckedNodes.pop()
+ }
+ }
+ /*!
+ * lunr.Index
+ * Copyright (C) 2020 Oliver Nightingale
+ */
+
+ /**
+ * An index contains the built index of all documents and provides a query interface
+ * to the index.
+ *
+ * Usually instances of lunr.Index will not be created using this constructor, instead
+ * lunr.Builder should be used to construct new indexes, or lunr.Index.load should be
+ * used to load previously built and serialized indexes.
+ *
+ * @constructor
+ * @param {Object} attrs - The attributes of the built search index.
+ * @param {Object} attrs.invertedIndex - An index of term/field to document reference.
+ * @param {Object} attrs.fieldVectors - Field vectors
+ * @param {lunr.TokenSet} attrs.tokenSet - An set of all corpus tokens.
+ * @param {string[]} attrs.fields - The names of indexed document fields.
+ * @param {lunr.Pipeline} attrs.pipeline - The pipeline to use for search terms.
+ */
+ lunr.Index = function (attrs) {
+ this.invertedIndex = attrs.invertedIndex
+ this.fieldVectors = attrs.fieldVectors
+ this.tokenSet = attrs.tokenSet
+ this.fields = attrs.fields
+ this.pipeline = attrs.pipeline
+ }
+
+ /**
+ * A result contains details of a document matching a search query.
+ * @typedef {Object} lunr.Index~Result
+ * @property {string} ref - The reference of the document this result represents.
+ * @property {number} score - A number between 0 and 1 representing how similar this document is to the query.
+ * @property {lunr.MatchData} matchData - Contains metadata about this match including which term(s) caused the match.
+ */
+
+ /**
+ * Although lunr provides the ability to create queries using lunr.Query, it also provides a simple
+ * query language which itself is parsed into an instance of lunr.Query.
+ *
+ * For programmatically building queries it is advised to directly use lunr.Query, the query language
+ * is best used for human entered text rather than program generated text.
+ *
+ * At its simplest queries can just be a single term, e.g. `hello`, multiple terms are also supported
+ * and will be combined with OR, e.g `hello world` will match documents that contain either 'hello'
+ * or 'world', though those that contain both will rank higher in the results.
+ *
+ * Wildcards can be included in terms to match one or more unspecified characters, these wildcards can
+ * be inserted anywhere within the term, and more than one wildcard can exist in a single term. Adding
+ * wildcards will increase the number of documents that will be found but can also have a negative
+ * impact on query performance, especially with wildcards at the beginning of a term.
+ *
+ * Terms can be restricted to specific fields, e.g. `title:hello`, only documents with the term
+ * hello in the title field will match this query. Using a field not present in the index will lead
+ * to an error being thrown.
+ *
+ * Modifiers can also be added to terms, lunr supports edit distance and boost modifiers on terms. A term
+ * boost will make documents matching that term score higher, e.g. `foo^5`. Edit distance is also supported
+ * to provide fuzzy matching, e.g. 'hello~2' will match documents with hello with an edit distance of 2.
+ * Avoid large values for edit distance to improve query performance.
+ *
+ * Each term also supports a presence modifier. By default a term's presence in document is optional, however
+ * this can be changed to either required or prohibited. For a term's presence to be required in a document the
+ * term should be prefixed with a '+', e.g. `+foo bar` is a search for documents that must contain 'foo' and
+ * optionally contain 'bar'. Conversely a leading '-' sets the terms presence to prohibited, i.e. it must not
+ * appear in a document, e.g. `-foo bar` is a search for documents that do not contain 'foo' but may contain 'bar'.
+ *
+ * To escape special characters the backslash character '\' can be used, this allows searches to include
+ * characters that would normally be considered modifiers, e.g. `foo\~2` will search for a term "foo~2" instead
+ * of attempting to apply a boost of 2 to the search term "foo".
+ *
+ * @typedef {string} lunr.Index~QueryString
+ * @example
Simple single term query
+ * hello
+ * @example
Multiple term query
+ * hello world
+ * @example
term scoped to a field
+ * title:hello
+ * @example
term with a boost of 10
+ * hello^10
+ * @example
term with an edit distance of 2
+ * hello~2
+ * @example
terms with presence modifiers
+ * -foo +bar baz
+ */
+
+ /**
+ * Performs a search against the index using lunr query syntax.
+ *
+ * Results will be returned sorted by their score, the most relevant results
+ * will be returned first. For details on how the score is calculated, please see
+ * the {@link https://lunrjs.com/guides/searching.html#scoring|guide}.
+ *
+ * For more programmatic querying use lunr.Index#query.
+ *
+ * @param {lunr.Index~QueryString} queryString - A string containing a lunr query.
+ * @throws {lunr.QueryParseError} If the passed query string cannot be parsed.
+ * @returns {lunr.Index~Result[]}
+ */
+ lunr.Index.prototype.search = function (queryString) {
+ return this.query(function (query) {
+ var parser = new lunr.QueryParser(queryString, query)
+ parser.parse()
+ })
+ }
+
+ /**
+ * A query builder callback provides a query object to be used to express
+ * the query to perform on the index.
+ *
+ * @callback lunr.Index~queryBuilder
+ * @param {lunr.Query} query - The query object to build up.
+ * @this lunr.Query
+ */
+
+ /**
+ * Performs a query against the index using the yielded lunr.Query object.
+ *
+ * If performing programmatic queries against the index, this method is preferred
+ * over lunr.Index#search so as to avoid the additional query parsing overhead.
+ *
+ * A query object is yielded to the supplied function which should be used to
+ * express the query to be run against the index.
+ *
+ * Note that although this function takes a callback parameter it is _not_ an
+ * asynchronous operation, the callback is just yielded a query object to be
+ * customized.
+ *
+ * @param {lunr.Index~queryBuilder} fn - A function that is used to build the query.
+ * @returns {lunr.Index~Result[]}
+ */
+ lunr.Index.prototype.query = function (fn) {
+ // for each query clause
+ // * process terms
+ // * expand terms from token set
+ // * find matching documents and metadata
+ // * get document vectors
+ // * score documents
+
+ var query = new lunr.Query(this.fields),
+ matchingFields = Object.create(null),
+ queryVectors = Object.create(null),
+ termFieldCache = Object.create(null),
+ requiredMatches = Object.create(null),
+ prohibitedMatches = Object.create(null)
+
+ /*
+ * To support field level boosts a query vector is created per
+ * field. An empty vector is eagerly created to support negated
+ * queries.
+ */
+ for (var i = 0; i < this.fields.length; i++) {
+ queryVectors[this.fields[i]] = new lunr.Vector
+ }
+
+ fn.call(query, query)
+
+ for (var i = 0; i < query.clauses.length; i++) {
+ /*
+ * Unless the pipeline has been disabled for this term, which is
+ * the case for terms with wildcards, we need to pass the clause
+ * term through the search pipeline. A pipeline returns an array
+ * of processed terms. Pipeline functions may expand the passed
+ * term, which means we may end up performing multiple index lookups
+ * for a single query term.
+ */
+ var clause = query.clauses[i],
+ terms = null,
+ clauseMatches = lunr.Set.empty
+
+ if (clause.usePipeline) {
+ terms = this.pipeline.runString(clause.term, {
+ fields: clause.fields
+ })
+ } else {
+ terms = [clause.term]
+ }
+
+ for (var m = 0; m < terms.length; m++) {
+ var term = terms[m]
+
+ /*
+ * Each term returned from the pipeline needs to use the same query
+ * clause object, e.g. the same boost and or edit distance. The
+ * simplest way to do this is to re-use the clause object but mutate
+ * its term property.
+ */
+ clause.term = term
+
+ /*
+ * From the term in the clause we create a token set which will then
+ * be used to intersect the indexes token set to get a list of terms
+ * to lookup in the inverted index
+ */
+ var termTokenSet = lunr.TokenSet.fromClause(clause),
+ expandedTerms = this.tokenSet.intersect(termTokenSet).toArray()
+
+ /*
+ * If a term marked as required does not exist in the tokenSet it is
+ * impossible for the search to return any matches. We set all the field
+ * scoped required matches set to empty and stop examining any further
+ * clauses.
+ */
+ if (expandedTerms.length === 0 && clause.presence === lunr.Query.presence.REQUIRED) {
+ for (var k = 0; k < clause.fields.length; k++) {
+ var field = clause.fields[k]
+ requiredMatches[field] = lunr.Set.empty
+ }
+
+ break
+ }
+
+ for (var j = 0; j < expandedTerms.length; j++) {
+ /*
+ * For each term get the posting and termIndex, this is required for
+ * building the query vector.
+ */
+ var expandedTerm = expandedTerms[j],
+ posting = this.invertedIndex[expandedTerm],
+ termIndex = posting._index
+
+ for (var k = 0; k < clause.fields.length; k++) {
+ /*
+ * For each field that this query term is scoped by (by default
+ * all fields are in scope) we need to get all the document refs
+ * that have this term in that field.
+ *
+ * The posting is the entry in the invertedIndex for the matching
+ * term from above.
+ */
+ var field = clause.fields[k],
+ fieldPosting = posting[field],
+ matchingDocumentRefs = Object.keys(fieldPosting),
+ termField = expandedTerm + "/" + field,
+ matchingDocumentsSet = new lunr.Set(matchingDocumentRefs)
+
+ /*
+ * if the presence of this term is required ensure that the matching
+ * documents are added to the set of required matches for this clause.
+ *
+ */
+ if (clause.presence == lunr.Query.presence.REQUIRED) {
+ clauseMatches = clauseMatches.union(matchingDocumentsSet)
+
+ if (requiredMatches[field] === undefined) {
+ requiredMatches[field] = lunr.Set.complete
+ }
+ }
+
+ /*
+ * if the presence of this term is prohibited ensure that the matching
+ * documents are added to the set of prohibited matches for this field,
+ * creating that set if it does not yet exist.
+ */
+ if (clause.presence == lunr.Query.presence.PROHIBITED) {
+ if (prohibitedMatches[field] === undefined) {
+ prohibitedMatches[field] = lunr.Set.empty
+ }
+
+ prohibitedMatches[field] = prohibitedMatches[field].union(matchingDocumentsSet)
+
+ /*
+ * Prohibited matches should not be part of the query vector used for
+ * similarity scoring and no metadata should be extracted so we continue
+ * to the next field
+ */
+ continue
+ }
+
+ /*
+ * The query field vector is populated using the termIndex found for
+ * the term and a unit value with the appropriate boost applied.
+ * Using upsert because there could already be an entry in the vector
+ * for the term we are working with. In that case we just add the scores
+ * together.
+ */
+ queryVectors[field].upsert(termIndex, clause.boost, function (a, b) { return a + b })
+
+ /**
+ * If we've already seen this term, field combo then we've already collected
+ * the matching documents and metadata, no need to go through all that again
+ */
+ if (termFieldCache[termField]) {
+ continue
+ }
+
+ for (var l = 0; l < matchingDocumentRefs.length; l++) {
+ /*
+ * All metadata for this term/field/document triple
+ * are then extracted and collected into an instance
+ * of lunr.MatchData ready to be returned in the query
+ * results
+ */
+ var matchingDocumentRef = matchingDocumentRefs[l],
+ matchingFieldRef = new lunr.FieldRef(matchingDocumentRef, field),
+ metadata = fieldPosting[matchingDocumentRef],
+ fieldMatch
+
+ if ((fieldMatch = matchingFields[matchingFieldRef]) === undefined) {
+ matchingFields[matchingFieldRef] = new lunr.MatchData(expandedTerm, field, metadata)
+ } else {
+ fieldMatch.add(expandedTerm, field, metadata)
+ }
+
+ }
+
+ termFieldCache[termField] = true
+ }
+ }
+ }
+
+ /**
+ * If the presence was required we need to update the requiredMatches field sets.
+ * We do this after all fields for the term have collected their matches because
+ * the clause terms presence is required in _any_ of the fields not _all_ of the
+ * fields.
+ */
+ if (clause.presence === lunr.Query.presence.REQUIRED) {
+ for (var k = 0; k < clause.fields.length; k++) {
+ var field = clause.fields[k]
+ requiredMatches[field] = requiredMatches[field].intersect(clauseMatches)
+ }
+ }
+ }
+
+ /**
+ * Need to combine the field scoped required and prohibited
+ * matching documents into a global set of required and prohibited
+ * matches
+ */
+ var allRequiredMatches = lunr.Set.complete,
+ allProhibitedMatches = lunr.Set.empty
+
+ for (var i = 0; i < this.fields.length; i++) {
+ var field = this.fields[i]
+
+ if (requiredMatches[field]) {
+ allRequiredMatches = allRequiredMatches.intersect(requiredMatches[field])
+ }
+
+ if (prohibitedMatches[field]) {
+ allProhibitedMatches = allProhibitedMatches.union(prohibitedMatches[field])
+ }
+ }
+
+ var matchingFieldRefs = Object.keys(matchingFields),
+ results = [],
+ matches = Object.create(null)
+
+ /*
+ * If the query is negated (contains only prohibited terms)
+ * we need to get _all_ fieldRefs currently existing in the
+ * index. This is only done when we know that the query is
+ * entirely prohibited terms to avoid any cost of getting all
+ * fieldRefs unnecessarily.
+ *
+ * Additionally, blank MatchData must be created to correctly
+ * populate the results.
+ */
+ if (query.isNegated()) {
+ matchingFieldRefs = Object.keys(this.fieldVectors)
+
+ for (var i = 0; i < matchingFieldRefs.length; i++) {
+ var matchingFieldRef = matchingFieldRefs[i]
+ var fieldRef = lunr.FieldRef.fromString(matchingFieldRef)
+ matchingFields[matchingFieldRef] = new lunr.MatchData
+ }
+ }
+
+ for (var i = 0; i < matchingFieldRefs.length; i++) {
+ /*
+ * Currently we have document fields that match the query, but we
+ * need to return documents. The matchData and scores are combined
+ * from multiple fields belonging to the same document.
+ *
+ * Scores are calculated by field, using the query vectors created
+ * above, and combined into a final document score using addition.
+ */
+ var fieldRef = lunr.FieldRef.fromString(matchingFieldRefs[i]),
+ docRef = fieldRef.docRef
+
+ if (!allRequiredMatches.contains(docRef)) {
+ continue
+ }
+
+ if (allProhibitedMatches.contains(docRef)) {
+ continue
+ }
+
+ var fieldVector = this.fieldVectors[fieldRef],
+ score = queryVectors[fieldRef.fieldName].similarity(fieldVector),
+ docMatch
+
+ if ((docMatch = matches[docRef]) !== undefined) {
+ docMatch.score += score
+ docMatch.matchData.combine(matchingFields[fieldRef])
+ } else {
+ var match = {
+ ref: docRef,
+ score: score,
+ matchData: matchingFields[fieldRef]
+ }
+ matches[docRef] = match
+ results.push(match)
+ }
+ }
+
+ /*
+ * Sort the results objects by score, highest first.
+ */
+ return results.sort(function (a, b) {
+ return b.score - a.score
+ })
+ }
+
+ /**
+ * Prepares the index for JSON serialization.
+ *
+ * The schema for this JSON blob will be described in a
+ * separate JSON schema file.
+ *
+ * @returns {Object}
+ */
+ lunr.Index.prototype.toJSON = function () {
+ var invertedIndex = Object.keys(this.invertedIndex)
+ .sort()
+ .map(function (term) {
+ return [term, this.invertedIndex[term]]
+ }, this)
+
+ var fieldVectors = Object.keys(this.fieldVectors)
+ .map(function (ref) {
+ return [ref, this.fieldVectors[ref].toJSON()]
+ }, this)
+
+ return {
+ version: lunr.version,
+ fields: this.fields,
+ fieldVectors: fieldVectors,
+ invertedIndex: invertedIndex,
+ pipeline: this.pipeline.toJSON()
+ }
+ }
+
+ /**
+ * Loads a previously serialized lunr.Index
+ *
+ * @param {Object} serializedIndex - A previously serialized lunr.Index
+ * @returns {lunr.Index}
+ */
+ lunr.Index.load = function (serializedIndex) {
+ var attrs = {},
+ fieldVectors = {},
+ serializedVectors = serializedIndex.fieldVectors,
+ invertedIndex = Object.create(null),
+ serializedInvertedIndex = serializedIndex.invertedIndex,
+ tokenSetBuilder = new lunr.TokenSet.Builder,
+ pipeline = lunr.Pipeline.load(serializedIndex.pipeline)
+
+ if (serializedIndex.version != lunr.version) {
+ lunr.utils.warn("Version mismatch when loading serialised index. Current version of lunr '" + lunr.version + "' does not match serialized index '" + serializedIndex.version + "'")
+ }
+
+ for (var i = 0; i < serializedVectors.length; i++) {
+ var tuple = serializedVectors[i],
+ ref = tuple[0],
+ elements = tuple[1]
+
+ fieldVectors[ref] = new lunr.Vector(elements)
+ }
+
+ for (var i = 0; i < serializedInvertedIndex.length; i++) {
+ var tuple = serializedInvertedIndex[i],
+ term = tuple[0],
+ posting = tuple[1]
+
+ tokenSetBuilder.insert(term)
+ invertedIndex[term] = posting
+ }
+
+ tokenSetBuilder.finish()
+
+ attrs.fields = serializedIndex.fields
+
+ attrs.fieldVectors = fieldVectors
+ attrs.invertedIndex = invertedIndex
+ attrs.tokenSet = tokenSetBuilder.root
+ attrs.pipeline = pipeline
+
+ return new lunr.Index(attrs)
+ }
+ /*!
+ * lunr.Builder
+ * Copyright (C) 2020 Oliver Nightingale
+ */
+
+ /**
+ * lunr.Builder performs indexing on a set of documents and
+ * returns instances of lunr.Index ready for querying.
+ *
+ * All configuration of the index is done via the builder, the
+ * fields to index, the document reference, the text processing
+ * pipeline and document scoring parameters are all set on the
+ * builder before indexing.
+ *
+ * @constructor
+ * @property {string} _ref - Internal reference to the document reference field.
+ * @property {string[]} _fields - Internal reference to the document fields to index.
+ * @property {object} invertedIndex - The inverted index maps terms to document fields.
+ * @property {object} documentTermFrequencies - Keeps track of document term frequencies.
+ * @property {object} documentLengths - Keeps track of the length of documents added to the index.
+ * @property {lunr.tokenizer} tokenizer - Function for splitting strings into tokens for indexing.
+ * @property {lunr.Pipeline} pipeline - The pipeline performs text processing on tokens before indexing.
+ * @property {lunr.Pipeline} searchPipeline - A pipeline for processing search terms before querying the index.
+ * @property {number} documentCount - Keeps track of the total number of documents indexed.
+ * @property {number} _b - A parameter to control field length normalization, setting this to 0 disabled normalization, 1 fully normalizes field lengths, the default value is 0.75.
+ * @property {number} _k1 - A parameter to control how quickly an increase in term frequency results in term frequency saturation, the default value is 1.2.
+ * @property {number} termIndex - A counter incremented for each unique term, used to identify a terms position in the vector space.
+ * @property {array} metadataWhitelist - A list of metadata keys that have been whitelisted for entry in the index.
+ */
+ lunr.Builder = function () {
+ this._ref = "id"
+ this._fields = Object.create(null)
+ this._documents = Object.create(null)
+ this.invertedIndex = Object.create(null)
+ this.fieldTermFrequencies = {}
+ this.fieldLengths = {}
+ this.tokenizer = lunr.tokenizer
+ this.pipeline = new lunr.Pipeline
+ this.searchPipeline = new lunr.Pipeline
+ this.documentCount = 0
+ this._b = 0.75
+ this._k1 = 1.2
+ this.termIndex = 0
+ this.metadataWhitelist = []
+ }
+
+ /**
+ * Sets the document field used as the document reference. Every document must have this field.
+ * The type of this field in the document should be a string, if it is not a string it will be
+ * coerced into a string by calling toString.
+ *
+ * The default ref is 'id'.
+ *
+ * The ref should _not_ be changed during indexing, it should be set before any documents are
+ * added to the index. Changing it during indexing can lead to inconsistent results.
+ *
+ * @param {string} ref - The name of the reference field in the document.
+ */
+ lunr.Builder.prototype.ref = function (ref) {
+ this._ref = ref
+ }
+
+ /**
+ * A function that is used to extract a field from a document.
+ *
+ * Lunr expects a field to be at the top level of a document, if however the field
+ * is deeply nested within a document an extractor function can be used to extract
+ * the right field for indexing.
+ *
+ * @callback fieldExtractor
+ * @param {object} doc - The document being added to the index.
+ * @returns {?(string|object|object[])} obj - The object that will be indexed for this field.
+ * @example
Extracting a nested field
+ * function (doc) { return doc.nested.field }
+ */
+
+ /**
+ * Adds a field to the list of document fields that will be indexed. Every document being
+ * indexed should have this field. Null values for this field in indexed documents will
+ * not cause errors but will limit the chance of that document being retrieved by searches.
+ *
+ * All fields should be added before adding documents to the index. Adding fields after
+ * a document has been indexed will have no effect on already indexed documents.
+ *
+ * Fields can be boosted at build time. This allows terms within that field to have more
+ * importance when ranking search results. Use a field boost to specify that matches within
+ * one field are more important than other fields.
+ *
+ * @param {string} fieldName - The name of a field to index in all documents.
+ * @param {object} attributes - Optional attributes associated with this field.
+ * @param {number} [attributes.boost=1] - Boost applied to all terms within this field.
+ * @param {fieldExtractor} [attributes.extractor] - Function to extract a field from a document.
+ * @throws {RangeError} fieldName cannot contain unsupported characters '/'
+ */
+ lunr.Builder.prototype.field = function (fieldName, attributes) {
+ if (/\//.test(fieldName)) {
+ throw new RangeError("Field '" + fieldName + "' contains illegal character '/'")
+ }
+
+ this._fields[fieldName] = attributes || {}
+ }
+
+ /**
+ * A parameter to tune the amount of field length normalisation that is applied when
+ * calculating relevance scores. A value of 0 will completely disable any normalisation
+ * and a value of 1 will fully normalise field lengths. The default is 0.75. Values of b
+ * will be clamped to the range 0 - 1.
+ *
+ * @param {number} number - The value to set for this tuning parameter.
+ */
+ lunr.Builder.prototype.b = function (number) {
+ if (number < 0) {
+ this._b = 0
+ } else if (number > 1) {
+ this._b = 1
+ } else {
+ this._b = number
+ }
+ }
+
+ /**
+ * A parameter that controls the speed at which a rise in term frequency results in term
+ * frequency saturation. The default value is 1.2. Setting this to a higher value will give
+ * slower saturation levels, a lower value will result in quicker saturation.
+ *
+ * @param {number} number - The value to set for this tuning parameter.
+ */
+ lunr.Builder.prototype.k1 = function (number) {
+ this._k1 = number
+ }
+
+ /**
+ * Adds a document to the index.
+ *
+ * Before adding fields to the index the index should have been fully setup, with the document
+ * ref and all fields to index already having been specified.
+ *
+ * The document must have a field name as specified by the ref (by default this is 'id') and
+ * it should have all fields defined for indexing, though null or undefined values will not
+ * cause errors.
+ *
+ * Entire documents can be boosted at build time. Applying a boost to a document indicates that
+ * this document should rank higher in search results than other documents.
+ *
+ * @param {object} doc - The document to add to the index.
+ * @param {object} attributes - Optional attributes associated with this document.
+ * @param {number} [attributes.boost=1] - Boost applied to all terms within this document.
+ */
+ lunr.Builder.prototype.add = function (doc, attributes) {
+ var docRef = doc[this._ref],
+ fields = Object.keys(this._fields)
+
+ this._documents[docRef] = attributes || {}
+ this.documentCount += 1
+
+ for (var i = 0; i < fields.length; i++) {
+ var fieldName = fields[i],
+ extractor = this._fields[fieldName].extractor,
+ field = extractor ? extractor(doc) : doc[fieldName],
+ tokens = this.tokenizer(field, {
+ fields: [fieldName]
+ }),
+ terms = this.pipeline.run(tokens),
+ fieldRef = new lunr.FieldRef(docRef, fieldName),
+ fieldTerms = Object.create(null)
+
+ this.fieldTermFrequencies[fieldRef] = fieldTerms
+ this.fieldLengths[fieldRef] = 0
+
+ // store the length of this field for this document
+ this.fieldLengths[fieldRef] += terms.length
+
+ // calculate term frequencies for this field
+ for (var j = 0; j < terms.length; j++) {
+ var term = terms[j]
+
+ if (fieldTerms[term] == undefined) {
+ fieldTerms[term] = 0
+ }
+
+ fieldTerms[term] += 1
+
+ // add to inverted index
+ // create an initial posting if one doesn't exist
+ if (this.invertedIndex[term] == undefined) {
+ var posting = Object.create(null)
+ posting["_index"] = this.termIndex
+ this.termIndex += 1
+
+ for (var k = 0; k < fields.length; k++) {
+ posting[fields[k]] = Object.create(null)
+ }
+
+ this.invertedIndex[term] = posting
+ }
+
+ // add an entry for this term/fieldName/docRef to the invertedIndex
+ if (this.invertedIndex[term][fieldName][docRef] == undefined) {
+ this.invertedIndex[term][fieldName][docRef] = Object.create(null)
+ }
+
+ // store all whitelisted metadata about this token in the
+ // inverted index
+ for (var l = 0; l < this.metadataWhitelist.length; l++) {
+ var metadataKey = this.metadataWhitelist[l],
+ metadata = term.metadata[metadataKey]
+
+ if (this.invertedIndex[term][fieldName][docRef][metadataKey] == undefined) {
+ this.invertedIndex[term][fieldName][docRef][metadataKey] = []
+ }
+
+ this.invertedIndex[term][fieldName][docRef][metadataKey].push(metadata)
+ }
+ }
+
+ }
+ }
+
+ /**
+ * Calculates the average document length for this index
+ *
+ * @private
+ */
+ lunr.Builder.prototype.calculateAverageFieldLengths = function () {
+
+ var fieldRefs = Object.keys(this.fieldLengths),
+ numberOfFields = fieldRefs.length,
+ accumulator = {},
+ documentsWithField = {}
+
+ for (var i = 0; i < numberOfFields; i++) {
+ var fieldRef = lunr.FieldRef.fromString(fieldRefs[i]),
+ field = fieldRef.fieldName
+
+ documentsWithField[field] || (documentsWithField[field] = 0)
+ documentsWithField[field] += 1
+
+ accumulator[field] || (accumulator[field] = 0)
+ accumulator[field] += this.fieldLengths[fieldRef]
+ }
+
+ var fields = Object.keys(this._fields)
+
+ for (var i = 0; i < fields.length; i++) {
+ var fieldName = fields[i]
+ accumulator[fieldName] = accumulator[fieldName] / documentsWithField[fieldName]
+ }
+
+ this.averageFieldLength = accumulator
+ }
+
+ /**
+ * Builds a vector space model of every document using lunr.Vector
+ *
+ * @private
+ */
+ lunr.Builder.prototype.createFieldVectors = function () {
+ var fieldVectors = {},
+ fieldRefs = Object.keys(this.fieldTermFrequencies),
+ fieldRefsLength = fieldRefs.length,
+ termIdfCache = Object.create(null)
+
+ for (var i = 0; i < fieldRefsLength; i++) {
+ var fieldRef = lunr.FieldRef.fromString(fieldRefs[i]),
+ fieldName = fieldRef.fieldName,
+ fieldLength = this.fieldLengths[fieldRef],
+ fieldVector = new lunr.Vector,
+ termFrequencies = this.fieldTermFrequencies[fieldRef],
+ terms = Object.keys(termFrequencies),
+ termsLength = terms.length
+
+
+ var fieldBoost = this._fields[fieldName].boost || 1,
+ docBoost = this._documents[fieldRef.docRef].boost || 1
+
+ for (var j = 0; j < termsLength; j++) {
+ var term = terms[j],
+ tf = termFrequencies[term],
+ termIndex = this.invertedIndex[term]._index,
+ idf, score, scoreWithPrecision
+
+ if (termIdfCache[term] === undefined) {
+ idf = lunr.idf(this.invertedIndex[term], this.documentCount)
+ termIdfCache[term] = idf
+ } else {
+ idf = termIdfCache[term]
+ }
+
+ score = idf * ((this._k1 + 1) * tf) / (this._k1 * (1 - this._b + this._b * (fieldLength / this.averageFieldLength[fieldName])) + tf)
+ score *= fieldBoost
+ score *= docBoost
+ scoreWithPrecision = Math.round(score * 1000) / 1000
+ // Converts 1.23456789 to 1.234.
+ // Reducing the precision so that the vectors take up less
+ // space when serialised. Doing it now so that they behave
+ // the same before and after serialisation. Also, this is
+ // the fastest approach to reducing a number's precision in
+ // JavaScript.
+
+ fieldVector.insert(termIndex, scoreWithPrecision)
+ }
+
+ fieldVectors[fieldRef] = fieldVector
+ }
+
+ this.fieldVectors = fieldVectors
+ }
+
+ /**
+ * Creates a token set of all tokens in the index using lunr.TokenSet
+ *
+ * @private
+ */
+ lunr.Builder.prototype.createTokenSet = function () {
+ this.tokenSet = lunr.TokenSet.fromArray(
+ Object.keys(this.invertedIndex).sort()
+ )
+ }
+
+ /**
+ * Builds the index, creating an instance of lunr.Index.
+ *
+ * This completes the indexing process and should only be called
+ * once all documents have been added to the index.
+ *
+ * @returns {lunr.Index}
+ */
+ lunr.Builder.prototype.build = function () {
+ this.calculateAverageFieldLengths()
+ this.createFieldVectors()
+ this.createTokenSet()
+
+ return new lunr.Index({
+ invertedIndex: this.invertedIndex,
+ fieldVectors: this.fieldVectors,
+ tokenSet: this.tokenSet,
+ fields: Object.keys(this._fields),
+ pipeline: this.searchPipeline
+ })
+ }
+
+ /**
+ * Applies a plugin to the index builder.
+ *
+ * A plugin is a function that is called with the index builder as its context.
+ * Plugins can be used to customise or extend the behaviour of the index
+ * in some way. A plugin is just a function, that encapsulated the custom
+ * behaviour that should be applied when building the index.
+ *
+ * The plugin function will be called with the index builder as its argument, additional
+ * arguments can also be passed when calling use. The function will be called
+ * with the index builder as its context.
+ *
+ * @param {Function} plugin The plugin to apply.
+ */
+ lunr.Builder.prototype.use = function (fn) {
+ var args = Array.prototype.slice.call(arguments, 1)
+ args.unshift(this)
+ fn.apply(this, args)
+ }
+ /**
+ * Contains and collects metadata about a matching document.
+ * A single instance of lunr.MatchData is returned as part of every
+ * lunr.Index~Result.
+ *
+ * @constructor
+ * @param {string} term - The term this match data is associated with
+ * @param {string} field - The field in which the term was found
+ * @param {object} metadata - The metadata recorded about this term in this field
+ * @property {object} metadata - A cloned collection of metadata associated with this document.
+ * @see {@link lunr.Index~Result}
+ */
+ lunr.MatchData = function (term, field, metadata) {
+ var clonedMetadata = Object.create(null),
+ metadataKeys = Object.keys(metadata || {})
+
+ // Cloning the metadata to prevent the original
+ // being mutated during match data combination.
+ // Metadata is kept in an array within the inverted
+ // index so cloning the data can be done with
+ // Array#slice
+ for (var i = 0; i < metadataKeys.length; i++) {
+ var key = metadataKeys[i]
+ clonedMetadata[key] = metadata[key].slice()
+ }
+
+ this.metadata = Object.create(null)
+
+ if (term !== undefined) {
+ this.metadata[term] = Object.create(null)
+ this.metadata[term][field] = clonedMetadata
+ }
+ }
+
+ /**
+ * An instance of lunr.MatchData will be created for every term that matches a
+ * document. However only one instance is required in a lunr.Index~Result. This
+ * method combines metadata from another instance of lunr.MatchData with this
+ * objects metadata.
+ *
+ * @param {lunr.MatchData} otherMatchData - Another instance of match data to merge with this one.
+ * @see {@link lunr.Index~Result}
+ */
+ lunr.MatchData.prototype.combine = function (otherMatchData) {
+ var terms = Object.keys(otherMatchData.metadata)
+
+ for (var i = 0; i < terms.length; i++) {
+ var term = terms[i],
+ fields = Object.keys(otherMatchData.metadata[term])
+
+ if (this.metadata[term] == undefined) {
+ this.metadata[term] = Object.create(null)
+ }
+
+ for (var j = 0; j < fields.length; j++) {
+ var field = fields[j],
+ keys = Object.keys(otherMatchData.metadata[term][field])
+
+ if (this.metadata[term][field] == undefined) {
+ this.metadata[term][field] = Object.create(null)
+ }
+
+ for (var k = 0; k < keys.length; k++) {
+ var key = keys[k]
+
+ if (this.metadata[term][field][key] == undefined) {
+ this.metadata[term][field][key] = otherMatchData.metadata[term][field][key]
+ } else {
+ this.metadata[term][field][key] = this.metadata[term][field][key].concat(otherMatchData.metadata[term][field][key])
+ }
+
+ }
+ }
+ }
+ }
+
+ /**
+ * Add metadata for a term/field pair to this instance of match data.
+ *
+ * @param {string} term - The term this match data is associated with
+ * @param {string} field - The field in which the term was found
+ * @param {object} metadata - The metadata recorded about this term in this field
+ */
+ lunr.MatchData.prototype.add = function (term, field, metadata) {
+ if (!(term in this.metadata)) {
+ this.metadata[term] = Object.create(null)
+ this.metadata[term][field] = metadata
+ return
+ }
+
+ if (!(field in this.metadata[term])) {
+ this.metadata[term][field] = metadata
+ return
+ }
+
+ var metadataKeys = Object.keys(metadata)
+
+ for (var i = 0; i < metadataKeys.length; i++) {
+ var key = metadataKeys[i]
+
+ if (key in this.metadata[term][field]) {
+ this.metadata[term][field][key] = this.metadata[term][field][key].concat(metadata[key])
+ } else {
+ this.metadata[term][field][key] = metadata[key]
+ }
+ }
+ }
+ /**
+ * A lunr.Query provides a programmatic way of defining queries to be performed
+ * against a {@link lunr.Index}.
+ *
+ * Prefer constructing a lunr.Query using the {@link lunr.Index#query} method
+ * so the query object is pre-initialized with the right index fields.
+ *
+ * @constructor
+ * @property {lunr.Query~Clause[]} clauses - An array of query clauses.
+ * @property {string[]} allFields - An array of all available fields in a lunr.Index.
+ */
+ lunr.Query = function (allFields) {
+ this.clauses = []
+ this.allFields = allFields
+ }
+
+ /**
+ * Constants for indicating what kind of automatic wildcard insertion will be used when constructing a query clause.
+ *
+ * This allows wildcards to be added to the beginning and end of a term without having to manually do any string
+ * concatenation.
+ *
+ * The wildcard constants can be bitwise combined to select both leading and trailing wildcards.
+ *
+ * @constant
+ * @default
+ * @property {number} wildcard.NONE - The term will have no wildcards inserted, this is the default behaviour
+ * @property {number} wildcard.LEADING - Prepend the term with a wildcard, unless a leading wildcard already exists
+ * @property {number} wildcard.TRAILING - Append a wildcard to the term, unless a trailing wildcard already exists
+ * @see lunr.Query~Clause
+ * @see lunr.Query#clause
+ * @see lunr.Query#term
+ * @example
+ * query.term('foo', {
+ * wildcard: lunr.Query.wildcard.LEADING | lunr.Query.wildcard.TRAILING
+ * })
+ */
+
+ lunr.Query.wildcard = new String("*")
+ lunr.Query.wildcard.NONE = 0
+ lunr.Query.wildcard.LEADING = 1
+ lunr.Query.wildcard.TRAILING = 2
+
+ /**
+ * Constants for indicating what kind of presence a term must have in matching documents.
+ *
+ * @constant
+ * @enum {number}
+ * @see lunr.Query~Clause
+ * @see lunr.Query#clause
+ * @see lunr.Query#term
+ * @example
query term with required presence
+ * query.term('foo', { presence: lunr.Query.presence.REQUIRED })
+ */
+ lunr.Query.presence = {
+ /**
+ * Term's presence in a document is optional, this is the default value.
+ */
+ OPTIONAL: 1,
+
+ /**
+ * Term's presence in a document is required, documents that do not contain
+ * this term will not be returned.
+ */
+ REQUIRED: 2,
+
+ /**
+ * Term's presence in a document is prohibited, documents that do contain
+ * this term will not be returned.
+ */
+ PROHIBITED: 3
+ }
+
+ /**
+ * A single clause in a {@link lunr.Query} contains a term and details on how to
+ * match that term against a {@link lunr.Index}.
+ *
+ * @typedef {Object} lunr.Query~Clause
+ * @property {string[]} fields - The fields in an index this clause should be matched against.
+ * @property {number} [boost=1] - Any boost that should be applied when matching this clause.
+ * @property {number} [editDistance] - Whether the term should have fuzzy matching applied, and how fuzzy the match should be.
+ * @property {boolean} [usePipeline] - Whether the term should be passed through the search pipeline.
+ * @property {number} [wildcard=lunr.Query.wildcard.NONE] - Whether the term should have wildcards appended or prepended.
+ * @property {number} [presence=lunr.Query.presence.OPTIONAL] - The terms presence in any matching documents.
+ */
+
+ /**
+ * Adds a {@link lunr.Query~Clause} to this query.
+ *
+ * Unless the clause contains the fields to be matched all fields will be matched. In addition
+ * a default boost of 1 is applied to the clause.
+ *
+ * @param {lunr.Query~Clause} clause - The clause to add to this query.
+ * @see lunr.Query~Clause
+ * @returns {lunr.Query}
+ */
+ lunr.Query.prototype.clause = function (clause) {
+ if (!('fields' in clause)) {
+ clause.fields = this.allFields
+ }
+
+ if (!('boost' in clause)) {
+ clause.boost = 1
+ }
+
+ if (!('usePipeline' in clause)) {
+ clause.usePipeline = true
+ }
+
+ if (!('wildcard' in clause)) {
+ clause.wildcard = lunr.Query.wildcard.NONE
+ }
+
+ if ((clause.wildcard & lunr.Query.wildcard.LEADING) && (clause.term.charAt(0) != lunr.Query.wildcard)) {
+ clause.term = "*" + clause.term
+ }
+
+ if ((clause.wildcard & lunr.Query.wildcard.TRAILING) && (clause.term.slice(-1) != lunr.Query.wildcard)) {
+ clause.term = "" + clause.term + "*"
+ }
+
+ if (!('presence' in clause)) {
+ clause.presence = lunr.Query.presence.OPTIONAL
+ }
+
+ this.clauses.push(clause)
+
+ return this
+ }
+
+ /**
+ * A negated query is one in which every clause has a presence of
+ * prohibited. These queries require some special processing to return
+ * the expected results.
+ *
+ * @returns boolean
+ */
+ lunr.Query.prototype.isNegated = function () {
+ for (var i = 0; i < this.clauses.length; i++) {
+ if (this.clauses[i].presence != lunr.Query.presence.PROHIBITED) {
+ return false
+ }
+ }
+
+ return true
+ }
+
+ /**
+ * Adds a term to the current query, under the covers this will create a {@link lunr.Query~Clause}
+ * to the list of clauses that make up this query.
+ *
+ * The term is used as is, i.e. no tokenization will be performed by this method. Instead conversion
+ * to a token or token-like string should be done before calling this method.
+ *
+ * The term will be converted to a string by calling `toString`. Multiple terms can be passed as an
+ * array, each term in the array will share the same options.
+ *
+ * @param {object|object[]} term - The term(s) to add to the query.
+ * @param {object} [options] - Any additional properties to add to the query clause.
+ * @returns {lunr.Query}
+ * @see lunr.Query#clause
+ * @see lunr.Query~Clause
+ * @example
adding a single term to a query
+ * query.term("foo")
+ * @example
adding a single term to a query and specifying search fields, term boost and automatic trailing wildcard