diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 000000000..352dcca68 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,24 @@ +// For format details, see https://aka.ms/devcontainer.json. +{ + "name": "Arcane Algorithm Archive All Languages", + + // Comment out 'image' and uncomment 'build' to test changes to the dockerfile locally + "image": "ghcr.io/algorithm-archivists/aaa-langs:latest", + // "build": {\ + // // For config options, see the README at:https://github.com/microsoft/vscode-dev-containers/tree/v0.187.0/containers/ubuntu + // "dockerfile": "../Dockerfile", + // // Update 'VARIANT' to pick an Ubuntu version: focal, bionic + // "args": { "VARIANT": "focal" } + // }, + + // Set *default* container specific settings.json values on container create. + "settings": {}, + + // Add the IDs of extensions you want installed when the container is created. + "extensions": [], + + // Use 'forwardPorts' to make a list of ports inside the container available locally (outside the container). + // Port : Usage + // 4000 : Honkit serves by default on this port + "forwardPorts": [4000] +} diff --git a/.editorconfig b/.editorconfig index d36f18254..a3fbd6e02 100644 --- a/.editorconfig +++ b/.editorconfig @@ -135,8 +135,8 @@ indent_size = 4 indent_style = space indent_size = 4 -# Racket -[*.rkt] +# Racket/Scheme +[*.{rkt,ss,scm}] indent_style = space indent_size = 2 @@ -150,6 +150,10 @@ indent_size = 4 indent_style = space indent_size = 4 +# V +[*.v] +indent_style = tab + # Whitespace [*.ws] indent_style = space @@ -157,3 +161,17 @@ indent_size = 0 trim_trailing_whitespace = false insert_final_newline = false end_of_line = lf + +# PowerShell +[*.ps1] +indent_style = space +indent_size = 4 + +# CoffeeScript +[*.coffee] +indent_style = space +indent_size = 2 + +[*.coco] +indent_style = space +indent_size = 4 diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 000000000..ebc72065b --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1,12 @@ +# These are supported funding model platforms + +github: leios +patreon: # Replace with a single Patreon username +open_collective: # Replace with a single Open Collective username +ko_fi: # Replace with a single Ko-fi username +tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel +community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry +liberapay: # Replace with a single Liberapay username +issuehunt: # Replace with a single IssueHunt username +otechie: # Replace with a single Otechie username +custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] diff --git a/.github/PULL_REQUEST_TEMPLATE/CHAPTER_SUBMISSION_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE/CHAPTER_SUBMISSION_TEMPLATE.md index 5744ba2da..b4e2edb81 100644 --- a/.github/PULL_REQUEST_TEMPLATE/CHAPTER_SUBMISSION_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE/CHAPTER_SUBMISSION_TEMPLATE.md @@ -11,7 +11,7 @@ Before continuing, please be sure you have read the [How to contribute guide for 1. discussed the chapter with James Schloss (Leios) first and know where this chapter fits into the broader scope of the project 2. edited the Markdown file with the appropriate line numbers for your submission -3. built the algorithm archive with `gitbook install && gitbook serve` to make sure your code can be seen on your branch +3. built the algorithm archive with `npm install && npm run serve` to make sure your code can be seen on your branch 4. followed all necessary [style guidelines](https://github.com/algorithm-archivists/algorithm-archive/wiki/Code-style-guide) for the initial submission language, if available 5. agreed to share your chapter under [CC BY-SA](https://creativecommons.org/licenses/by-sa/4.0/deed.en) diff --git a/.github/PULL_REQUEST_TEMPLATE/CODE_SUBMISSION_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE/CODE_SUBMISSION_TEMPLATE.md index 215498446..866d4af8b 100644 --- a/.github/PULL_REQUEST_TEMPLATE/CODE_SUBMISSION_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE/CODE_SUBMISSION_TEMPLATE.md @@ -9,7 +9,7 @@ Thanks for submitting code to the Algorithm Archive! Before continuing, please be sure you have read the [How to contribute guide for code submission](https://github.com/algorithm-archivists/algorithm-archive/wiki/How-to-Contribute#step-3---submitting-code) and have: 1. edited the Markdown file with the appropriate line numbers for your submission -2. built the Algorithm Archive with `gitbook install && gitbook serve` to make sure your code can be seen on your branch +2. built the Algorithm Archive with `npm install && npm run serve` to make sure your code can be seen on your branch 3. followed all necessary [style guidelines](https://github.com/algorithm-archivists/algorithm-archive/wiki/Code-style-guide) for your language, if available If you would like to contact us, we are also available on Discord at https://discord.gg/2PEjsR diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 000000000..cd2038090 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,29 @@ +name: build +on: pull_request + +jobs: + build: + runs-on: ubuntu-latest + container: + options: --entrypoint /bin/bash --user 0 + image: ghcr.io/algorithm-archivists/aaa-langs:latest + defaults: + run: + shell: bash --rcfile /root/.bashrc -eo pipefail {0} + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + persist-credentials: false + + - name: Install and build + run: | + npm install + npx honkit build + + - name: Initalize cargo and run SCons + env: + HOME: /root + run: | + . "$HOME/.cargo/env" + scons -Q --random -j2 diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml new file mode 100644 index 000000000..e07b1fade --- /dev/null +++ b/.github/workflows/deploy.yml @@ -0,0 +1,26 @@ +name: Build and Deploy +on: + push: + branches: + - main + +jobs: + build-and-deploy: + runs-on: ubuntu-latest + if: github.repository == 'algorithm-archivists/algorithm-archive' + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + persist-credentials: false + + - name: Install and build + run: | + npm install + npx honkit build + + - name: Deploy + uses: JamesIves/github-pages-deploy-action@4.1.4 + with: + branch: gh-pages + folder: _book diff --git a/.github/workflows/issue_comment.yml b/.github/workflows/issue_comment.yml new file mode 100644 index 000000000..e7d87c53d --- /dev/null +++ b/.github/workflows/issue_comment.yml @@ -0,0 +1,19 @@ +name: Add Label with Comment + +on: [issue_comment] + +jobs: + create_comment: + runs-on: ubuntu-latest + steps: + - uses: actions-ecosystem/action-regex-match@v2 + id: regex-match + with: + text: ${{ github.event.comment.body }} + regex: '\[lang:\s*(.*?)\s*\]' + + - uses: actions-ecosystem/action-add-labels@v1 + if: ${{ steps.regex-match.outputs.match != '' }} + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + labels: "lang: ${{ steps.regex-match.outputs.group1 }}" diff --git a/.github/workflows/publish_container.yml b/.github/workflows/publish_container.yml new file mode 100644 index 000000000..e49663542 --- /dev/null +++ b/.github/workflows/publish_container.yml @@ -0,0 +1,18 @@ +name: Publish Docker +on: + push: + branches: + - main +jobs: + build: + runs-on: ubuntu-latest + if: github.repository == 'algorithm-archivists/algorithm-archive' + steps: + - uses: actions/checkout@master + - name: Publish to Registry + uses: elgohr/Publish-Docker-Github-Action@v5 + with: + name: algorithm-archivists/aaa-langs + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + registry: ghcr.io diff --git a/.gitignore b/.gitignore index 09ad4e177..08c567100 100644 --- a/.gitignore +++ b/.gitignore @@ -478,7 +478,7 @@ paket-files/ # Python Tools for Visual Studio (PTVS) __pycache__/ *.pyc - +*.ipynb_checkpoints* # Cake - Uncomment if you are using it # tools/** # !tools/packages.config @@ -509,3 +509,29 @@ __pycache__/ # Settings directory for visual studio code vscode/ +# Data file extension for Algorithm Archive +*.dat + +# Coconut compilation files +**/coconut/*.py + +# aspell +*.bak + +# SCons intermidiate files +.sconsign.dblite +*.o + +# SCons build directory +build/ + +# Cargo artifacts +Cargo.lock +target/ + +*.out +*.class + +# OCaml compilation files +*.cmi +*.cmx diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 000000000..e69de29bb diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 9111770c9..000000000 --- a/.travis.yml +++ /dev/null @@ -1,26 +0,0 @@ -dist: trusty -sudo: false - -language: node_js -node_js: - - "4" - -install: - - npm install gitbook-cli -g - - gitbook install - -before_script: - - mkdir -p "${TRAVIS_BUILD_DIR}"/build - -script: - - env | sort - - gitbook build . "${TRAVIS_BUILD_DIR}"/build - -after_success: - - | - if [[ "${TRAVIS_BRANCH}" == master && "${TRAVIS_PULL_REQUEST}" == false ]]; then - # Commits to master that are not pull requests, that is, only - # actual addition of code to master, should deploy the book to - # the site. - bash "${TRAVIS_BUILD_DIR}"/tools/deploy/update_site_travis.bash - fi diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index a9d53d836..15c319751 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -42,9 +42,26 @@ This file lists everyone, who contributed to this repo and wanted to show up her - Christopher Milan - Vexatos - Raven-Blue Dragon -- Björn Heinrichs +- Björn Heinrichs - Olav Sundfør - Ben Chislett - dovisutu - Antetokounpo - Akash Dhiman +- Vincent Zalzal +- Jonathan D B Van Schenck +- James Goytia +- Sammy Plat +- Jonathan Dönszelmann +- Ishaan Verma +- Delphi1024 +- ntindle +- Mahdi Sarikhani +- Ridham177 +- Hugo Salou +- Dimitri Belopopsky +- Henrik Abel Christensen +- K. Shudipto Amin +- Peanutbutter_Warrior +- Thijs Raymakers +- Michael Ciccotosto-Camp diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 000000000..7e8a3826a --- /dev/null +++ b/Dockerfile @@ -0,0 +1,112 @@ +# See here for image contents: https://github.com/microsoft/vscode-dev-containers/tree/v0.187.0/containers/ubuntu/.devcontainer/base.Dockerfile + +ARG DEBIAN_FRONTEND=noninteractive + +# [Choice] Ubuntu version: bionic, focal +ARG VARIANT="focal" +FROM mcr.microsoft.com/vscode/devcontainers/base:0-${VARIANT} + +RUN apt-get update \ + && apt-get -y install --no-install-recommends build-essential software-properties-common xz-utils g++ sbcl julia python3 python3-pip python3-dev ghc openjdk-11-jdk libssl-dev gfortran libxml2-dev libyaml-dev libgmp-dev libz-dev libncurses5 gnuplot nodejs npm lua5.3 ocaml php ruby-full gnu-smalltalk scratch libfftw3-dev cmake mono-devel + +# Setup Crystal +RUN echo 'deb http://download.opensuse.org/repositories/devel:/languages:/crystal/xUbuntu_20.04/ /' | sudo tee /etc/apt/sources.list.d/devel:languages:crystal.list +RUN curl -fsSL https://download.opensuse.org/repositories/devel:languages:crystal/xUbuntu_20.04/Release.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/devel_languages_crystal.gpg > /dev/null + +# Setup Dart +RUN sudo sh -c 'wget -qO- https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add -' +RUN sudo sh -c 'wget -qO- https://storage.googleapis.com/download.dartlang.org/linux/debian/dart_stable.list > /etc/apt/sources.list.d/dart_stable.list' + +# Setup Powershell +RUN sudo sh -c 'wget -q https://packages.microsoft.com/config/ubuntu/$(lsb_release -rs)/packages-microsoft-prod.deb -O packages-microsoft-prod.deb' +RUN sudo sh -c 'dpkg -i packages-microsoft-prod.deb' + +# Setup Clojure +RUN sudo sh -c 'curl -O https://download.clojure.org/install/linux-install-1.10.3.967.sh' +RUN sudo sh -c 'chmod +x linux-install-1.10.3.967.sh' +RUN sudo sh -c 'sudo ./linux-install-1.10.3.967.sh' + +# Setup dotnet +RUN sudo sh -c 'wget https://packages.microsoft.com/config/ubuntu/20.04/packages-microsoft-prod.deb -O packages-microsoft-prod.deb' +RUN sudo sh -c 'sudo dpkg -i packages-microsoft-prod.deb' +RUN sudo sh -c 'rm packages-microsoft-prod.deb' + +# Setup D Lang +ENV DLANG_VERSION=2.097.2 +RUN mkdir -p ~/dlang && wget https://dlang.org/install.sh -O ~/dlang/install.sh +RUN bash ~/dlang/install.sh dmd-$DLANG_VERSION +ENV PATH=$PATH:~/dlang/dmd-$DLANG_VERSION/linux/bin64/ + +# Setup Go +RUN sudo sh -c 'wget -c https://dl.google.com/go/go1.14.2.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local' +ENV PATH=$PATH:/usr/local/go/bin + +# Setup Kotlin +RUN mkdir -p ~/kotlin && wget -c https://github.com/JetBrains/kotlin/releases/download/v1.5.30/kotlin-compiler-1.5.30.zip -O ~/kotlin/kotlinc.zip && cd ~/kotlin && unzip kotlinc.zip +ENV PATH=$PATH:~/kotlin/kotlinc/bin + +# Setup lolcode +RUN git clone https://github.com/justinmeza/lci.git ~/lolcode && cd ~/lolcode && mkdir build && cd build && cmake .. && make -B +ENV PATH=$PATH:~/lolcode/build + +# Setup Piet +RUN python3 -m pip install --no-cache-dir repiet + +# Setup Matlab +# ?????? This is a licensed language??? + +# Setup Emojicode +RUN mkdir -p ~/emojicode && wget -c https://github.com/emojicode/emojicode/releases/download/v1.0-beta.2/Emojicode-1.0-beta.2-Linux-x86_64.tar.gz -O ~/emojicode/emojicode.tar.gz && \ + tar -xzf ~/emojicode/emojicode.tar.gz -C ~/emojicode --strip-components=1 +ENV PATH=$PATH:~/emojicode + +# Setup Factor +RUN mkdir -p ~/factor && wget https://downloads.factorcode.org/releases/0.98/factor-linux-x86-64-0.98.tar.gz -O ~/factor/factor.tar.gz && tar -xzf ~/factor/factor.tar.gz -C ~/factor --strip-components=1 +ENV PATH=$PATH:~/factor/factor + +# Setup R +RUN sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys E298A3A825C0D65DFD57CBB651716619E084DAB9 +RUN sudo add-apt-repository 'deb https://cloud.r-project.org/bin/linux/ubuntu focal-cran40/' + +# Setup Racket and Scheme +# To run scheme files, use `racket -f ` +RUN sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys D9D33FCD84D82C17288BA03B3C9A6980F827E01E +RUN sudo add-apt-repository 'deb http://ppa.launchpad.net/plt/racket/ubuntu focal main' + +# Setup Rust +RUN curl https://sh.rustup.rs -sSf | sh -s -- -y + +# Setup Scratch +## using 1.x right now.... in future checkout snap or adobe air? + +# Setup Swift +RUN mkdir -p ~/swift && wget https://swift.org/builds/swift-5.5-release/ubuntu2004/swift-5.5-RELEASE/swift-5.5-RELEASE-ubuntu20.04.tar.gz -O ~/swift/swift.tar.gz && \ + tar -xzf ~/swift/swift.tar.gz -C ~/swift --strip-components=1 +ENV PATH=$PATH:~/swift/usr/bin + +# Setup viml +# To run vim script commands use `/usr/bin/vim -c ":source %" ` +RUN apt-get -y install --no-install-recommends vim + +# Setup whitespace +RUN mkdir -p ~/whitespace && git clone https://github.com/wspace/whitespace-haskell ~/whitespace && cd ~/whitespace && make -B +ENV PATH=$PATH:~/whitespace + +# Setup Elm +RUN mkdir -p ~/elm && curl -L -o ~/elm/elm.gz https://github.com/elm/compiler/releases/download/0.19.1/binary-for-linux-64-bit.gz && \ + gunzip ~/elm/elm.gz && chmod +x ~/elm/elm +ENV PATH=$PATH:~/elm + +# Setup V +RUN mkdir -p ~/vlang && wget https://github.com/vlang/v/releases/download/weekly.2021.44/v_linux.zip -O ~/vlang/vlang.zip && \ + unzip ~/vlang/vlang.zip -d ~/vlang +ENV PATH=$PATH:~/vlang/v + +# Install the packages that needed extra help +RUN apt-get update \ + && apt-get -y install --no-install-recommends crystal dart nim powershell scala dotnet-sdk-5.0 r-base racket + + +RUN python3 -m pip install --no-cache-dir wheel matplotlib numpy coconut scons + +RUN sudo sh -c 'npm install -g typescript' diff --git a/README.md b/README.md index 4371dd00c..ff37394dd 100644 --- a/README.md +++ b/README.md @@ -1,20 +1,20 @@ -> **IMPORTANT NOTE FOR NEW CONTRIBUTORS:** -> -> We do **not** yet accept entirely new chapters by everyone. If you would like to start working on one, please contact James Schloss (Leios) first. If you create a full chapter, including text, and submit it as a pull request it is most likely going to get rejected. -> -> If you want to help, it is best to write language examples for *existing* chapters. You can also try to find spelling or other mistakes in existing chapters and submit fixes for those. - # The Arcane Algorithm Archive The Arcane Algorithm Archive is a collaborative effort to create a guide for all important algorithms in all languages. This goal is obviously too ambitious for a book of any size, but it is a great project to learn from and work on and will hopefully become an incredible resource for programmers in the future. +To change the language, please use the UI at the top of the site: + +

+ +

+ Here are some essential links: - Book / website: - GitHub repository: - YouTube channel (LeiosOS): -- Twitch livestream (simuleios): +- Twitch livestream: - Discord server: Note that this project is essentially a book about algorithms collaboratively written by an online community. diff --git a/SConstruct b/SConstruct new file mode 100644 index 000000000..74e05ea85 --- /dev/null +++ b/SConstruct @@ -0,0 +1,141 @@ +""" +SCons top-level build description (SConstruct) for the Arcane Algorithm Achive + +This provides Builder objects for each of the language implementations in the AAA; however, this work cannot be considered exhaustive until every language has been covered. + +Currently, the aim is to provide a way to compile or copy the implementation files to the build directory, as well as to provide ways to run them and capture their output. + +To run the compilation for all implementations in one language, e.g. C, run the command `scons build/c`, and the resulting executables will be available in the `build/c` directory, each in their respective algorithm directory, containing the executable.""" + +from pathlib import Path +from collections import namedtuple +import os + +import SCons +SCons.Warnings.warningAsException() + +# For interpreted languages to copy to build directory +copy_builder = Builder(action=Copy('$TARGET', '$SOURCE')) + +env = Environment(ENV=os.environ, + BUILDERS={'Copier': copy_builder}, + tools=[ + 'g++', 'gas', 'gcc', 'gfortran', 'gnulink', 'javac'], + toolpath=['builders']) + +available_languages = { + 'asm-x64', + 'bash', + 'c', + 'cpp', + 'csharp', + 'fortran', + 'java', + 'julia', + 'lolcode' + 'lua', + 'php', + 'powershell', + 'python', + 'ruby', + 'viml', + 'racket', +} + +languages_to_import = { + 'coconut': ['coconut'], + 'csharp': ['mcs'], + 'go': ['go'], + 'rust': ['rustc', 'cargo'], + 'kotlin': ['kotlin'], + 'racket': ['racket'], +} + +for language, tools in languages_to_import.items(): + for tool in tools: + try: + env.Tool(tool) + except SCons.Warnings.SConsWarning as w: + print(f'{w.args[0][0]}, ignoring') + break + else: + available_languages.add(language) + + +Export('env') + +env['CCFLAGS'] = '-Wall -Wextra -Werror -pedantic -Wconversion' +env['CFLAGS'] = '-std=gnu99' +env['CXXFLAGS'] = '-std=c++17 -Wold-style-cast' +env['ASFLAGS'] = '--64' +env['COCONUTFLAGS'] = '--target 3.8' + +# Add other languages here when you want to add language targets +# Put 'name_of_language_directory' : 'file_extension' + +languages = { + 'asm-x64': 's', + 'bash': 'bash', + 'c': 'c', + 'coconut': 'coco', + 'cpp': 'cpp', + 'csharp': 'cs', + 'fortran': 'f90', + 'go': 'go', + 'java': 'java', + 'javascript': 'js', + 'julia': 'jl', + 'kotlin': 'kt', + 'lolcode': 'lol', + 'lua': 'lua', + 'php': 'php', + 'powershell': 'ps1', + 'python': 'py', + 'ruby': 'rb', + 'rust': 'rs', + 'viml': 'vim', + 'racket': 'rkt' +} + +# Do not add new Builders here, add them to the BUILDERS argument in the call to Environment above +env.C = env.Program +env.CPlusPlus = env.Program +env.X64 = env.Program +env.Fortran = env.Program + +for language in available_languages: + Alias(language, f'#/build/{language}') + +sconscripts = [] +files_to_compile = {language: [] for language in languages if language in available_languages} + +FileInformation = namedtuple('FileInformation', ['path', 'chapter', 'language']) + + +contents_path = Path.cwd().joinpath('contents') +for chapter_dir in contents_path.iterdir(): + for code_dir in chapter_dir.glob('**/code'): + # For nested chapters e.g. contents/convolutions/1d/ + extended_chapter_path = code_dir.relative_to(contents_path).parent + + for language_dir in code_dir.iterdir(): + if (language := language_dir.stem) in available_languages: + new_files = [FileInformation(path=file_path, + chapter=extended_chapter_path, + language=language) + for file_path in language_dir.glob(f'**/*.{languages[language]}') + ] + # Check for overriding SConscript + if (sconscript_path := language_dir / 'SConscript').exists(): + SConscript(sconscript_path, exports={'files_to_compile': new_files}) + else: + files_to_compile[language].extend(new_files) + +sconscript_dir_path = Path.cwd().joinpath('sconscripts') +for language, files in files_to_compile.items(): + if files: + if (sconscript_path := sconscript_dir_path / f"{language}_SConscript").exists(): + SConscript(sconscript_path, exports = {'files_to_compile': files}) + else: + print(f'{language} file found at {files[0]}, but no sconscript file is present ') + diff --git a/SUMMARY.md b/SUMMARY.md index 53e0ddace..412b7fd12 100644 --- a/SUMMARY.md +++ b/SUMMARY.md @@ -3,18 +3,30 @@ * [Algorithm Archive](README.md) * [Introduction](contents/introduction/introduction.md) * [How To Contribute](contents/how_to_contribute/how_to_contribute.md) + * [Code Reviewers](contents/code_reviews/code_reviewers.md) +* [Plotting](contents/plotting/plotting.md) + * [Domain Coloring](contents/domain_coloring/domain_coloring.md) + * [Iterated Function Systems](contents/IFS/IFS.md) + * [The Barnsley Fern](contents/barnsley/barnsley.md) * [Data Structures](contents/data_structures/data_structures.md) * [Stacks and Queues](contents/stacks_and_queues/stacks_and_queues.md) * [Mathematical Background](contents/mathematical_background/mathematical_background.md) * [Complexity Notation](contents/notation/notation.md) + * [Affine Transformations](contents/affine_transformations/affine_transformations.md) * [Bit Logic](contents/bitlogic/bitlogic.md) * [Taylor Series](contents/taylor_series_expansion/taylor_series_expansion.md) -* [Sorting and Searching](contents/sorting_and_searching/sorting_and_searching.md) - * [Bubble Sort](contents/bubble_sort/bubble_sort.md) - * [Bogo Sort](contents/bogo_sort/bogo_sort.md) + * [Convolutions](contents/convolutions/convolutions.md) + * [Convolutions in 1D](contents/convolutions/1d/1d.md) + * [Multiplication as a Convolution](contents/convolutions/multiplication/multiplication.md) + * [Convolutions of Images (2D)](contents/convolutions/2d/2d.md) + * [Convolutional Theorem](contents/convolutions/convolutional_theorem/convolutional_theorem.md) + * [Box Muller Transform](contents/box_muller/box_muller.md) + * [How costly is rejection sampling?](contents/box_muller/box_muller_rejection.md) + * [Probability Distributions](contents/probability_distributions/distributions.md) * [Tree Traversal](contents/tree_traversal/tree_traversal.md) * [Euclidean Algorithm](contents/euclidean_algorithm/euclidean_algorithm.md) * [Monte Carlo](contents/monte_carlo_integration/monte_carlo_integration.md) + * [Metropolis](contents/metropolis/metropolis.md) * [Matrix Methods](contents/matrix_methods/matrix_methods.md) * [Gaussian Elimination](contents/gaussian_elimination/gaussian_elimination.md) * [Thomas Algorithm](contents/thomas_algorithm/thomas_algorithm.md) @@ -25,12 +37,15 @@ * [FFT](contents/cooley_tukey/cooley_tukey.md) * [Decision Problems](contents/decision_problems/decision_problems.md) * [Stable Marriage Problem](contents/stable_marriage_problem/stable_marriage_problem.md) -* [Differential Equation Solvers](contents/differential_equations/differential_equations.md) - * [Forward Euler Method](contents/forward_euler_method/forward_euler_method.md) * [Physics Solvers](contents/physics_solvers/physics_solvers.md) * [Verlet Integration](contents/verlet_integration/verlet_integration.md) * [Quantum Systems](contents/quantum_systems/quantum_systems.md) * [Split-Operator Method](contents/split-operator_method/split-operator_method.md) * [Data Compression](contents/data_compression/data_compression.md) * [Huffman Encoding](contents/huffman_encoding/huffman_encoding.md) +* [Computer Graphics](contents/computer_graphics/computer_graphics.md) + * [Flood Fill](contents/flood_fill/flood_fill.md) * [Quantum Information](contents/quantum_information/quantum_information.md) +* [Cryptography](contents/cryptography/cryptography.md) +* [Computus](contents/computus/computus.md) +* [Approximate Counting Algorithm](contents/approximate_counting/approximate_counting.md) diff --git a/book.json b/book.json index 839600e50..690acec87 100644 --- a/book.json +++ b/book.json @@ -1,13 +1,13 @@ { - "gitbook": "3.x.x", + "honkit": ">= 3.0.0", + "root": "./", + "title": "Arcane Algorithm Archive", "plugins": [ - "fontsettings", "mathjax@https://github.com/algorithm-archivists/plugin-mathjax", "bibtex-cite", "wordcount", "api-language-selector@https://github.com/algorithm-archivists/gitbook-plugin-api-language-selector.git", "include-codeblock", - "ga", "bulk-redirect", "prism", "-highlight" @@ -27,9 +27,6 @@ "basepath": "/", "redirectsFile": "redirects.json" }, - "ga": { - "token": "UA-118252470-1" - }, "prism": { "lang": { "asm-x64": "nasm" @@ -192,6 +189,38 @@ { "lang": "ss", "name": "Scheme" + }, + { + "lang": "ps1", + "name": "PowerShell" + }, + { + "lang": "v", + "name": "Vlang" + }, + { + "lang": "coffee", + "name": "CoffeeScript" + }, + { + "lang": "kotlin", + "name": "Kotlin" + }, + { + "lang": "ts", + "name": "TypeScript" + }, + { + "lang": "vim", + "name": "VimL" + }, + { + "lang": "coco", + "name": "Coconut" + }, + { + "lang": "dart", + "name": "Dart" } ] } diff --git a/builders/cargo.py b/builders/cargo.py new file mode 100644 index 000000000..0ac22e086 --- /dev/null +++ b/builders/cargo.py @@ -0,0 +1,41 @@ +from SCons.Builder import Builder +from SCons.Script import Move +import SCons.Util + +class ToolCargoWarning(SCons.Warnings.SConsWarning): + pass + +class CargoNotFound(ToolCargoWarning): + pass + +SCons.Warnings.enableWarningClass(ToolCargoWarning) + +def _detect(env): + try: + return env['cargo'] + except KeyError: + pass + + cargo = env.WhereIs('cargo') + if cargo: + return cargo + + SCons.Warnings.warn(CargoNotFound, 'Could not detect cargo') + +def exists(env): + return env.Detect('cargo') + + +def generate(env): + env['CARGO'] = _detect(env) + env['CARGOFLAGS'] = [] + env['MANIFEST'] = [] + + rust_cargo_builder = Builder( + action=['"$CARGO" build $CARGOFLAGS --bins --manifest-path $MANIFEST', + Move('$TARGET$PROGSUFFIX', + '$SOURCE_DIR/target/debug/main$PROGSUFFIX') + ], + suffix='$PROGSUFFIX', + ) + env.Append(BUILDERS={'cargo': rust_cargo_builder}) diff --git a/builders/coconut.py b/builders/coconut.py new file mode 100644 index 000000000..36498412b --- /dev/null +++ b/builders/coconut.py @@ -0,0 +1,40 @@ +from SCons.Builder import Builder +import SCons.Util + +class ToolCocoWarning(SCons.Warnings.SConsWarning): + pass + +class CoconutNotFound(ToolCocoWarning): + pass + +SCons.Warnings.enableWarningClass(ToolCocoWarning) + +def _detect(env): + try: + return env['coconut'] + except KeyError: + pass + + coconut = env.WhereIs('coconut') + if coconut: + return coconut + + SCons.Warnings.warn(CoconutNotFound, 'Could not find Coconut executable') + + +def generate(env): + env['COCONUT'] = _detect(env) + env['COCONUTFLAGS'] = [] + + coconut_compiler = Builder( + action='"$COCONUT" $COCONUTFLAGS $SOURCE $TARGET', + src_suffix='.coco', + suffix='.py', + ) + + env.Append(BUILDERS={'Coconut': coconut_compiler}) + +def exists(env): + return env.Detect('coconut') + + diff --git a/builders/go.py b/builders/go.py new file mode 100644 index 000000000..261789092 --- /dev/null +++ b/builders/go.py @@ -0,0 +1,37 @@ +from SCons.Builder import Builder +import SCons.Util + +class ToolGoWarning(SCons.Warnings.SConsWarning): + pass + +class GoNotFound(ToolGoWarning): + pass + +SCons.Warnings.enableWarningClass(ToolGoWarning) + +def _detect(env): + try: + return env['go'] + except KeyError: + pass + + go = env.WhereIs('go') + if go: + return go + + SCons.Warnings.warn(GoNotFound, 'Could not find go executable') + +def exists(env): + env.Detect('go') + +def generate(env): + env['GO'] = _detect(env) + env['GOFLAGS'] = [] + + go_builder = Builder( + action='"$GO" build -o $TARGET $GOFLAGS $SOURCE', + src_suffix='.go', + suffix='$PROGSUFFIX', + ) + + env.Append(BUILDERS={'Go': go_builder}) diff --git a/builders/kotlin.py b/builders/kotlin.py new file mode 100644 index 000000000..fc1d9ecb1 --- /dev/null +++ b/builders/kotlin.py @@ -0,0 +1,184 @@ +# MIT License +# +# Copyright The SCons Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY +# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +"""SCons.Tool.kotlin +Tool-specific initialization for Kotlin. +""" + +import SCons.Action +import SCons.Builder +import SCons.Util + + +class ToolKotlinWarning(SCons.Warnings.SConsWarning): + pass + + +class KotlinNotFound(ToolKotlinWarning): + pass + + +SCons.Warnings.enableWarningClass(ToolKotlinWarning) + + +def _detect(env): + """ Try to detect the kotlinc binary """ + try: + return env["kotlinc"] + except KeyError: + pass + + kotlin = env.Detect("kotlinc") + if kotlin: + return kotlin + + SCons.Warnings.warn(KotlinNotFound, "Could not find kotlinc executable") + + +# +# Builders +# +kotlinc_builder = SCons.Builder.Builder( + action=SCons.Action.Action("$KOTLINCCOM", "$KOTLINCCOMSTR"), + suffix="$KOTLINCLASSSUFFIX", + src_suffix="$KOTLINSUFFIX", + single_source=True, +) # file by file + +kotlin_jar_builder = SCons.Builder.Builder( + action=SCons.Action.Action("$KOTLINJARCOM", "$KOTLINJARCOMSTR"), + suffix="$KOTLINJARSUFFIX", + src_suffix="$KOTLINSUFFIX", + single_source=True, +) # file by file + +kotlin_rtjar_builder = SCons.Builder.Builder( + action=SCons.Action.Action("$KOTLINRTJARCOM", "$KOTLINRTJARCOMSTR"), + suffix="$KOTLINJARSUFFIX", + src_suffix="$KOTLINSUFFIX", + single_source=True, +) # file by file + + +def Kotlin(env, target, source=None, *args, **kw): + """ + A pseudo-Builder wrapper for the kotlinc executable. + kotlinc [options] file + """ + if not SCons.Util.is_List(target): + target = [target] + if not source: + source = target[:] + if not SCons.Util.is_List(source): + source = [source] + + result = [] + kotlinc_suffix = env.subst("$KOTLINCLASSSUFFIX") + kotlinc_extension = env.subst("$KOTLINEXTENSION") + for t, s in zip(target, source): + t_ext = t + if not t.endswith(kotlinc_suffix): + if not t.endswith(kotlinc_extension): + t_ext += kotlinc_extension + + t_ext += kotlinc_suffix + # Ensure that the case of first letter is upper-case + t_ext = t_ext[:1].upper() + t_ext[1:] + # Call builder + kotlin_class = kotlinc_builder.__call__(env, t_ext, s, **kw) + result.extend(kotlin_class) + + return result + + +def KotlinJar(env, target, source=None, *args, **kw): + """ + A pseudo-Builder wrapper for creating JAR files with the kotlinc executable. + kotlinc [options] file -d target + """ + if not SCons.Util.is_List(target): + target = [target] + if not source: + source = target[:] + if not SCons.Util.is_List(source): + source = [source] + + result = [] + for t, s in zip(target, source): + # Call builder + kotlin_jar = kotlin_jar_builder.__call__(env, t, s, **kw) + result.extend(kotlin_jar) + + return result + + +def KotlinRuntimeJar(env, target, source=None, *args, **kw): + """ + A pseudo-Builder wrapper for creating standalone JAR files with the kotlinc executable. + kotlinc [options] file -d target -include-runtime + """ + if not SCons.Util.is_List(target): + target = [target] + if not source: + source = target[:] + if not SCons.Util.is_List(source): + source = [source] + + result = [] + for t, s in zip(target, source): + # Call builder + kotlin_jar = kotlin_rtjar_builder.__call__(env, t, s, **kw) + result.extend(kotlin_jar) + + return result + + +def generate(env): + """Add Builders and construction variables for kotlinc to an Environment.""" + + env["KOTLINC"] = _detect(env) + + env.SetDefault( + KOTLINC="kotlinc", + KOTLINSUFFIX=".kt", + KOTLINEXTENSION="Kt", + KOTLINCLASSSUFFIX=".class", + KOTLINJARSUFFIX=".jar", + KOTLINCFLAGS=SCons.Util.CLVar(), + KOTLINJARFLAGS=SCons.Util.CLVar(), + KOTLINRTJARFLAGS=SCons.Util.CLVar(["-include-runtime"]), + KOTLINCCOM="$KOTLINC $KOTLINCFLAGS $SOURCE", + KOTLINCCOMSTR="", + KOTLINJARCOM="$KOTLINC $KOTLINJARFLAGS -d $TARGET $SOURCE", + KOTLINJARCOMSTR="", + KOTLINRTJARCOM="$KOTLINC $KOTLINRTJARFLAGS -d $TARGET $SOURCE", + KOTLINRTJARCOMSTR="", + ) + + env.AddMethod(Kotlin, "Kotlin") + env.AddMethod(KotlinJar, "KotlinJar") + env.AddMethod(KotlinRuntimeJar, "KotlinRuntimeJar") + + +def exists(env): + return _detect(env) diff --git a/builders/mcs.py b/builders/mcs.py new file mode 100644 index 000000000..07f1bd76c --- /dev/null +++ b/builders/mcs.py @@ -0,0 +1,37 @@ +from SCons.Builder import Builder +import SCons.Util + +class ToolMCSWarning(SCons.Warnings.SConsWarning): + pass + +class MCSNotFound(ToolMCSWarning): + pass + +SCons.Warnings.enableWarningClass(ToolMCSWarning) + +def _detect(env): + try: + return env['mcs'] + except KeyError: + pass + + mcs = env.WhereIs('mcs') + if mcs: + return mcs + + SCons.Warnings.warn(MCSNotFound, 'Could not find mcs executable') + +def exists(env): + env.Detect('mcs') + +def generate(env): + env['MCS'] = _detect(env) + env['MCSFLAGS'] = [] + + mcs_builder = Builder( + action='"$MCS" -out:$TARGET $MCSFLAGS $SOURCES', + src_suffix='.cs', + suffix='$PROGSUFFIX', + ) + + env.Append(BUILDERS={'MCS': mcs_builder}) diff --git a/builders/racket.py b/builders/racket.py new file mode 100644 index 000000000..2729658fe --- /dev/null +++ b/builders/racket.py @@ -0,0 +1,37 @@ +from SCons.Builder import Builder +import SCons.Util + +class ToolRacketWarning(SCons.Warnings.SConsWarning): + pass + +class RacketNotFound(ToolRacketWarning): + pass + +SCons.Warnings.enableWarningClass(ToolRacketWarning) + +def _detect(env): + try: + return env['raco'] + except KeyError: + pass + + go = env.WhereIs('raco') + if go: + return go + + SCons.Warnings.warn(RacketNotFound, 'Could not find raco executable') + +def exists(env): + env.Detect('raco') + +def generate(env): + env['RACO'] = _detect(env) + env['RACOFLAGS'] = [] + + racket_builder = Builder( + action='"$RACO" exe -o $TARGET $RACOFLAGS $SOURCE', + src_suffix='.rkt', + suffix='$PROGSUFFIX', + ) + + env.Append(BUILDERS={'Racket': racket_builder}) diff --git a/builders/rustc.py b/builders/rustc.py new file mode 100644 index 000000000..f07cf4fad --- /dev/null +++ b/builders/rustc.py @@ -0,0 +1,45 @@ +from SCons.Builder import Builder +import SCons.Util + +class ToolRustcWarning(SCons.Warnings.SConsWarning): + pass + +class RustcNotFound(ToolRustcWarning): + pass + +SCons.Warnings.enableWarningClass(ToolRustcWarning) + +def _detect(env): + try: + return env['rustc'] + except KeyError: + pass + + cargo = env.WhereIs('rustc') + if cargo: + return cargo + + SCons.Warnings.warn(RustcNotFound, 'Could not detect rustc') + + +def exists(env): + return env.Detect('rustc') + +def rustc_emitter(target, source, env): + src_name = str(source[0]) + pdb_name = src_name.replace(source[0].suffix, '.pdb') + env.SideEffect(pdb_name, target) + env.Clean(target, pdb_name) + return (target, source) + +def generate(env): + env['RUSTC'] = _detect(env) + env['RUSTCFLAGS'] = [] + + rust_cargo_builder = Builder( + action='"$RUSTC" $RUSTCFLAGS -o $TARGET $SOURCE', + suffix='$PROGSUFFIX', + src_suffix='.rs', + emitter=rustc_emitter, + ) + env.Append(BUILDERS={'rustc': rust_cargo_builder}) diff --git a/contents/IFS/IFS.md b/contents/IFS/IFS.md new file mode 100644 index 000000000..1e1e45d37 --- /dev/null +++ b/contents/IFS/IFS.md @@ -0,0 +1,281 @@ +# Iterated Function Systems + +A few quick notes before we start: + +1. For this chapter, we will be following the methodology set by the [plotting chapter](../plotting/plotting.md). +That is to say that the code presented in this chapter will output another file that can be easily plotted by an external plotter. +If you like to use a plotter provided by your language of choice, please modify the code provided to do so. + +2. This chapter is currently a subsection to the plotting chapter, but we may extend the algorithm archive in the future with other fractal generation methods, which would require creating a new section on fractals, in particular. +This would include a chapter with more rigorous definitions on fractals, which is largely missing from the following discussion. +Please let us know if you are interested! + +In this chapter, we will show you how to make one of the most famous fractals, the Sierpinski triangle, via Iterated Function Systems (IFSs). +We will also introduce a number of interesting concepts for further exploration, such as chaos games, Hutchinson operators, and attractors. + +## The Sierpinski Triangle + +To begin the discussion of Iterated Function Systems (IFSs), we will first discuss what might be one of the most famous fractals currently known: the Sierpinski triangle (shown below): + +Sierpinsky Triangle Chaos Game + +This image is clearly a set of triangles embedded in a larger triangle in such a way that it can be continually cut into three identical pieces and still retain its internal structure. +This idea is known as self-similarity {{ "self-similar" | cite }}, and it is usually the first aspect of fractals to catch an audience's attention. +In fact, there are plenty of uses of fractals and their mathematical underpinnings, such as estimating the coastline of Britain {{ "mandelbrot1967long" | cite }}, identifying fingerprints {{ "jampour2010new" | cite }}, and image compression {{ "fractal-compression" | cite }}{{ "saupe1994review" | cite }}. +In many more rigorous definitions, a fractal can be described as any system that has a non-integer Hausdorff dimension {{ "3b1bfractal" | cite }}{{ "hausdorff" | cite }}{{ "gneiting2012estimators" | cite }}. +Though this is an incredibly interesting concept, the discussion of this chapter will instead focus on methods to generate fractal patterns through iterated function systems. + +To start, imagine creating a triangle from three points, $$A$$, $$B$$, and $$C$$. +These points can be arbitrarily chosen, but for this conversation, we will constrict them to the vertices of an equilateral triangle, as shown below: + +Triangle Vertices + +Now let's create three separate functions that can act on a 2-dimensional space: + +$$ +\begin{align} +f_1(P) &= \frac{P + A}{2}\\ +f_2(P) &= \frac{P + B}{2}\\ +f_3(P) &= \frac{P + C}{2}\\ +\end{align} +$$ + +Each function will read in a particular location in space (here, $$P \in \mathbb{R}^2$$) and output a new location that is the midpoint between the input location and $$A$$, $$B$$, or $$C$$ for $$f_1$$, $$f_2$$, and $$f_3$$ respectively. +The union of all of these functions (the set of all possible functions available for use) is often notated as the _Hutchinson operator_ {{ "hutchinson-operator" | cite }}{{ "hutchinson1981fractals" | cite }}, and for this case it would look like this: + +$$ +H(P) = \bigcup_{i=1}^3f_i(P) +$$ + +By iteratively using this operator, we can traverse through all possible movements in the set. +For example, let's generate 3 new points that are halfway between $$A$$ and $$B$$, $$B$$ and $$C$$, and $$A$$ and $$C$$, which will be called $$D$$, $$E$$, and $$F$$ respectively. +This is shown below: + +Triangle Midpoints + +From here, each new point ($$D$$, $$E$$, and $$F$$) will spawn 3 children, and each child will move according to one of the three possible functions in the Hutchinson operator, as shown below: + +
+ +
+ +Here, all red children come from $$D$$, green children come from $$E$$ and blue children come from $$F$$. +At this stage, the children will then spawn 3 more children, each of which will move according to a different function. +Those children will then spawn more children, who act accordingly. +As this process continues on and on, we begin to see an interesting pattern form: + +
+ +
+ +This is the Sierpinski triangle. +At first, it might seem like mathematical magic that a simple set of 3 functions can create such a pattern. +After all, why aren't any of the children migrating to the empty spaces in the structure? +This will require some thought, but the simplest answer is that no function within the Hutchinson operator allows for children to enter those spaces; therefore, none of the children can enter them. + +## What about a square? + +When I learned about how the Sierpinski triangle could be generated from 3 simple functions, I began to wonder about other shapes. +Could we create fractal squares? Hexagons? Circles? +Such shapes _seem_ like natural extensions to the triangular Hutchinson operator provided above, but there's a bit of a hitch... + +First, let's take 4 points, $$A$$, $$B$$, $$C$$, and $$D$$, this time located at the four vertices of a square, like so: + +Sierpinsky Triangle Chaos Game + +In a similar fashion, we'll create 4 functions with $$H(P) = \bigcup_{i=1}^4f_i(P)$$, and $$P \in \mathbb{R}^2$$ such that: + +$$ +\begin{align} +f_1(P) &= \frac{P + A}{2}\\ +f_2(P) &= \frac{P + B}{2}\\ +f_3(P) &= \frac{P + C}{2}\\ +f_4(P) &= \frac{P + D}{2}\\ +\end{align} +$$ + +If we then create 5 initial points located between all the vertices and allow these points to continually spawn children like before, something peculiar happens: + +
+ +
+ +We essentially see a square of squares. +What happened to the self-similar structure we were getting before? +Why isn't this more interesting? + +The best answer I have for now is that some Hutchinson operators are interesting and some are not. +Still, this square is a bit more interesting than it first appears, but to see why, we need to use the Hutchinson operator in a slightly different way. + +## Chaos games and attractors + +Until now, our visualizations for both the Sierpinski triangle and the square have been computationally costly. +Every iteration, we generate 3 or 4 new children per child per step of the simulation. +This scales exponentially and means that we will quickly have millions of children to keep track of! +In fact, to deal with this, we developed our own method of counting through the tree to more efficiently keep track of everything, but that is a story for another day. + +The question for now is whether there is a more computationally feasible way of iterating through our Hutchinson operator. + +As it turns out, there is! +Rather than keeping track of every possible movement within the Hutchinson operator to draw out a shape, it's actually possible to randomly sample the function set instead through a process known as a _chaos game_ {{ "chaos-game" | cite }}{{ "chaos-game-wolf" | cite }}.. +Here, instead of tracking children of children, we track a single individual that chooses randomly between the Hutchinson functions, as shown here: + +{% method %} +{% sample lang="jl" %} +[import:4-17, lang:"julia"](code/julia/IFS.jl) +{% sample lang="hs" %} +[import:7-13, lang:"haskell"](code/haskell/IFS.hs) +{% sample lang="cpp" %} +[import:39-52, lang:"cpp"](code/cpp/IFS.cpp) +{% sample lang="py" %} +[import:5-12, lang:"python"](code/python/IFS.py) +{% sample lang="c" %} +[import:18-29, lang:"c"](code/c/IFS.c) +{% sample lang="lisp" %} +[import:5-14, lang:"lisp"](code/clisp/ifs.lisp) +{% sample lang="coco" %} +[import:4-16, lang:"coconut"](code/coconut/IFS.coco) +{% sample lang="rust" %} +[import:9-20, lang:"rust"](code/rust/IFS.rs) +{% sample lang="java" %} +[import:16-39, lang:"java"](code/java/IFS.java) +{% sample lang="ps1" %} +[import:2-19, lang:"powershell"](code/powershell/IFS.ps1) +{% endmethod %} + +If we set the initial point to the on the equilateral triangle we saw before, we can see the Sierpinski triangle again after a few thousand iterations, as shown below: + +
+ +
+ +Here, we are plotting 200,000 point locations in sets of 1000, and every set becomes successively more blue as the visualization continues. +At first glance, this visualization seems bewildering. +After all, it appears as if the entire triangle just magically comes into view in a few seconds. +The important thing to remember here is that each of these 200,000 dots is another location that our initial point decided to visit. + +That said, there is something peculiar about the way the chaos game starts. +We are actually allowed to start the simulation *off* of the Sierpinski triangle. +As we mentioned earlier, none of the functions for the Sierpinski visualization allow children to enter the empty spaces of the triangle, so let's see what happens if we start the point off at the center of the triangle: + +
+ +
+ +Here, I have plotted the first 20 steps of the chaos game, and it is clear that the point gets closer and closer to the triangle each iteration. +Once it lands on the triangle, it can no longer escape and every movement from then on will be on the triangle. + +In a sense, the wanderin point is _attracted_ to the Sierpinski triangle with this set of functions, and that is actually the case! +The truth is that the word _attractor_ is a very loaded term in the literature, but for the purposes of our discussion here, an _attractor_ is any shape defined by the iteration through Hutchinson operator functions. + +So let's go back to the example with the 4 points along the square and generate the attractor via a chaos game instead of going through every branch of the Hutchinson operator. +If we do this, we get what seems to be a random distribution of points: + +Hutchinson square + +This kinda boggled my mind a bit when I looked at it for the first time. +What does a random distribution of points mean in this context? + +Well, firstly, it's only a random distribution between the square vertices of $$A$$, $$B$$, $$C$$, and $$D$$, but nothing exists outside of these points. +This means that it's not actually a random distribution of points, but instead an attractive plane that our lone wandering point can exist happily within. + +This really helped me understand how attractors present themselves in different dimensions. +The Sierpinski triangle seems like a series of lines (one-dimensional objects) in two-dimensional space, but the square is a truly two-dimensional object. +In general, this means that an attractor embedded within $$\mathbb{R}^N$$ can be any shape of dimension N or lower. + +The next obvious question is whether a square can create any more interesting fractally patterns, and the answer is "yes, but only if we restrict the movement a bit." +Which brings us to another topic entirely: restricted chaos games. +Discussing restricted chaos games in more detail is a chapter in its own right, so I will forego the discussion here. +If you are interested, please let me know and I will be more than willing to add the chapter in the future! + +## Video Explanation + +Here is a video describing iterated function systems: + +
+ +
+ +## Example Code + +For the code in this chapter, we have decided to write it specifically for the Chaos game, not the hutchinson animations shown at the start of the chapter. +This is because that animation is slightly tricky to create and distracts from the overall purpose of this chapter. +In addition, we have written the chaos game code to take in a set of points so that it is not hard-coded for the Sierpinski triangle and can be easily extended to other shapes like the square or restricted chaos games, as we mentioned before! + +{% method %} +{% sample lang="jl" %} +[import, lang:"julia"](code/julia/IFS.jl) +{% sample lang="hs" %} +[import, lang:"haskell"](code/haskell/IFS.hs) +{% sample lang="cpp" %} +[import, lang:"cpp"](code/cpp/IFS.cpp) +{% sample lang="py" %} +[import, lang:"python"](code/python/IFS.py) +{% sample lang="c" %} +[import, lang:"c"](code/c/IFS.c) +{% sample lang="lisp" %} +[import, lang:"lisp"](code/clisp/ifs.lisp) +{%sample lang="coco" %} +[import, lang:"coconut"](code/coconut/IFS.coco) +{%sample lang="rust" %} +[import, lang:"rust"](code/rust/IFS.rs) +{%sample lang="java" %} +[import, lang:"java"](code/java/IFS.java) +{% sample lang="ps1" %} +[import, lang:"powershell"](code/powershell/IFS.ps1) +{% endmethod %} + +### Bibliography + +{% references %} {% endreferences %} + + + +## License + +##### Code Examples + +The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/main/LICENSE.md)). + +##### Text + +The text of this chapter was written by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). + +[

](https://creativecommons.org/licenses/by-sa/4.0/) + +#### Images/Graphics + +- The image "[IFS triangle 1](res/IFS_triangle_1.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[IFS triangle 2](res/IFS_triangle_2.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[IFS triangle 3](res/IFS_triangle_3.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[IFS triangle 4](res/IFS_triangle_4.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[IFS triangle 5](res/IFS_triangle_5.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[IFS square 1](res/IFS_square_1.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[IFS square 2](res/IFS_square_2.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[IFS square 3](res/IFS_square_3.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[Chaos 1](res/chaos_1.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[Chaos 2](res/chaos_2.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[IFS triangle video 1](res/IFS_triangle_vid_1.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[IFS triangle video 2](res/IFS_triangle_vid_2.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[IFS square video 1](res/IFS_square_vid_1.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Chaos video 1](res/chaos_vid_1.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Chaos video 2](res/chaos_vid_2.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). diff --git a/contents/IFS/code/c/IFS.c b/contents/IFS/code/c/IFS.c new file mode 100644 index 000000000..ffd56f6fe --- /dev/null +++ b/contents/IFS/code/c/IFS.c @@ -0,0 +1,50 @@ +#include +#include +#include +#include + +struct point { + double x, y; +}; + +double drand() { + return ((double) rand() / (RAND_MAX)); +} + +struct point random_element(struct point *array, size_t n) { + return array[rand() % (int)n]; +} + +void chaos_game(struct point *in, size_t in_n, struct point *out, + size_t out_n) { + + struct point cur_point = {drand(), drand()}; + + for (size_t i = 0; i < out_n; ++i) { + out[i] = cur_point; + struct point tmp = random_element(in, in_n); + cur_point.x = 0.5 * (cur_point.x + tmp.x); + cur_point.y = 0.5 * (cur_point.y + tmp.y); + } +} + +int main() { + const size_t point_count = 10000; + + struct point shape_points [3] = {{0.0,0.0}, {0.5,sqrt(0.75)}, {1.0,0.0}}; + struct point out_points[point_count]; + + srand((unsigned int)time(NULL)); + + chaos_game(shape_points, 3, out_points, point_count); + + FILE *fp = fopen("sierpinksi.dat", "w+"); + + for (size_t i = 0; i < point_count; ++i) { + fprintf(fp, "%f\t%f\n", out_points[i].x, out_points[i].y); + } + + fclose(fp); + + return 0; +} diff --git a/contents/IFS/code/clisp/ifs.lisp b/contents/IFS/code/clisp/ifs.lisp new file mode 100644 index 000000000..c9fc0e432 --- /dev/null +++ b/contents/IFS/code/clisp/ifs.lisp @@ -0,0 +1,28 @@ +;;;; Iterated Function System implementation + +(defstruct (point (:constructor make-point (x y))) x y) + +(defun chaos-game (iterations shape-points) + "Plays a chaos game with a certain shape for a determined amount of iterations" + (loop + repeat iterations + for rand-point = (svref shape-points (random (length shape-points))) + for point = (make-point (random 1.0) (random 1.0)) ; starting point + then (make-point + (* 0.5 (+ (point-x rand-point) (point-x point))) + (* 0.5 (+ (point-y rand-point) (point-y point)))) ; every subsequent point + collect point)) + +(defparameter *shape-points* + (map + 'vector + (lambda (e) (apply #'make-point e)) + ;; the backquote allows us to selectively evaluate (sqrt 0.75) with the comma + `((0 0) (0.5 ,(sqrt 0.75)) (1 0)))) + +;; output the data to the "out.dat" file +(with-open-file (out "sierpinski.dat" :direction :output :if-exists :supersede) + (flet ((format-point (p) + ;; this is not very clean, but it's the simplest way to insert a tab into a string. + (format nil "~f~c~f" (point-x p) #\tab (point-y p)))) + (format out "~{~a~%~}" (map 'list #'format-point (chaos-game 10000 *shape-points*))))) diff --git a/contents/IFS/code/coconut/IFS.coco b/contents/IFS/code/coconut/IFS.coco new file mode 100644 index 000000000..2fce567d6 --- /dev/null +++ b/contents/IFS/code/coconut/IFS.coco @@ -0,0 +1,30 @@ +from math import sqrt +from random import random, choice + +data point(x=0, y=0): + def __add__(self, other): + return point(self.x + other.x, self.y + other.y) + + def __rmul__(self, other): + return point(self.x * other, self.y * other) + +def chaos_game(n, shape_points): + p = point(random(), random()) + + for _ in range(n): + p = (1/2) * (p + choice(shape_points)) + yield p + + +# This will generate a Sierpinski triangle with a chaos game of n points for an +# initial triangle with three points on the vertices of an equilateral triangle: +# A = (0.0, 0.0) +# B = (0.5, sqrt(0.75)) +# C = (1.0, 0.0) +# It will output the file sierpinski.dat, which can be plotted after +shape_points = [point(0.0, 0.0), + point(0.5, sqrt(0.75)), + point(1.0, 0.0)] +with open("sierpinski.dat", "w") as f: + for p in chaos_game(10000, shape_points): + f.write("{0}\t{1}\n".format(p.x, p.y)) diff --git a/contents/IFS/code/cpp/IFS.cpp b/contents/IFS/code/cpp/IFS.cpp new file mode 100644 index 000000000..31d6ce946 --- /dev/null +++ b/contents/IFS/code/cpp/IFS.cpp @@ -0,0 +1,65 @@ +#include +#include +#include +#include + +// Simple X-Y point structure, along with some operators +struct Point { + double x, y; +}; + +Point operator+(Point lhs, Point rhs) { return {lhs.x + rhs.x, lhs.y + rhs.y}; } +Point operator*(double k, Point pt) { return {k * pt.x, k * pt.y}; } +Point operator*(Point pt, double k) { return k * pt; } + +using PointVector = std::vector; + +// Returns a pseudo-random number generator +std::default_random_engine& rng() { + // Initialize static pseudo-random engine with non-deterministic random seed + static std::default_random_engine randEngine(std::random_device{}()); + return randEngine; +} + +// Returns a random double in [0, 1) +double drand() { + return std::uniform_real_distribution(0.0, 1.0)(rng()); +} + +// Returns a random integer in [0, numElems-1] +std::size_t randrange(std::size_t numElems) { + return std::uniform_int_distribution(0, numElems - 1)(rng()); +} + +// Return a random point from the non-empty PointVector +Point choose(const PointVector& points) { + return points[randrange(points.size())]; +} + +// This is a function to simulate a "chaos game" +PointVector chaosGame(int numOutputPoints, const PointVector& inputPoints) { + // Choose first point randomly + Point curPoint = {drand(), drand()}; + + // For each output point, compute midpoint to random input point + PointVector outputPoints(numOutputPoints); + for (auto& outPoint : outputPoints) { + outPoint = curPoint; + curPoint = 0.5 * (curPoint + choose(inputPoints)); + } + + return outputPoints; +} + +int main() { + // This will generate a Sierpinski triangle with a chaos game of n points for + // an initial triangle with three points on the vertices of an equilateral + // triangle. + PointVector inputPoints = {{0.0, 0.0}, {0.5, std::sqrt(0.75)}, {1.0, 0.0}}; + auto outputPoints = chaosGame(10000, inputPoints); + + // It will output the file sierpinski.dat, which can be plotted after + std::ofstream ofs("sierpinski.dat"); + for (auto pt : outputPoints) + ofs << pt.x << '\t' << pt.y << '\n'; +} diff --git a/contents/IFS/code/haskell/IFS.hs b/contents/IFS/code/haskell/IFS.hs new file mode 100644 index 000000000..41d8b4c9a --- /dev/null +++ b/contents/IFS/code/haskell/IFS.hs @@ -0,0 +1,30 @@ +import Data.Array ((!), Array, bounds, listArray) +import Data.List (intercalate) +import System.Random + +data Point = Point Double Double + +chaosGame :: RandomGen g => g -> Int -> Array Int (Point -> Point) -> [Point] +chaosGame g n hutchinson = take n points + where + (x, g') = random g + (y, g'') = random g' + choices = randomRs (bounds hutchinson) g'' + points = Point x y : zipWith (hutchinson !) choices points + +main :: IO () +main = do + g <- newStdGen + + let midPoint (Point a b) (Point x y) = Point ((a + x) / 2) ((b + y) / 2) + sierpinski = + listArray + (1, 3) + [ midPoint (Point 0 0), + midPoint (Point 0.5 (sqrt 0.75)), + midPoint (Point 1 0) + ] + points = chaosGame g 10000 sierpinski + showPoint (Point x y) = show x ++ "\t" ++ show y + + writeFile "sierpinski.dat" $ intercalate "\n" $ map showPoint points diff --git a/contents/IFS/code/java/IFS.java b/contents/IFS/code/java/IFS.java new file mode 100644 index 000000000..2e7afc2ca --- /dev/null +++ b/contents/IFS/code/java/IFS.java @@ -0,0 +1,61 @@ +import java.io.FileWriter; +import java.util.Random; + +public class IFS { + + private static class Point { + double x, y; + + public Point(double x, double y) { + this.x = x; + this.y = y; + } + } + + // This is a function to simulate a "chaos game" + public static Point[] chaosGame(int n, Point[] shapePoints) { + Random rng = new Random(); + + // Initialize output vector + Point[] outputPoints = new Point[n]; + + // Choose first point randomly + Point point = new Point(rng.nextDouble(), rng.nextDouble()); + + for (int i = 0; i < n; i++) { + outputPoints[i] = point; + + // Clone point to get a new reference + point = new Point(point.x, point.y); + + // Retrieve random shape point + Point temp = shapePoints[rng.nextInt(shapePoints.length)]; + // Calculate midpoint + point.x = 0.5 * (point.x + temp.x); + point.y = 0.5 * (point.y + temp.y); + } + + return outputPoints; + } + + public static void main(String[] args) throws Exception { + // This will generate a Sierpinski triangle with a chaos game of n points for an + // initial triangle with three points on the vertices of an equilateral triangle: + // A = (0.0, 0.0) + // B = (0.5, sqrt(0.75)) + // C = (1.0, 0.0) + // It will output the file sierpinski.dat, which can be plotted after + Point[] shapePoints = new Point[]{ + new Point(0.0, 0.0), + new Point(0.5, Math.sqrt(0.75)), + new Point(1.0, 0.0) + }; + Point[] outputPoints = chaosGame(10000, shapePoints); + + FileWriter fw = new FileWriter("sierpinski.dat"); + for (Point p : outputPoints) + fw.write(p.x + "\t" + p.y + "\n"); + fw.close(); + } + +} \ No newline at end of file diff --git a/contents/IFS/code/julia/IFS.jl b/contents/IFS/code/julia/IFS.jl new file mode 100644 index 000000000..338a8016f --- /dev/null +++ b/contents/IFS/code/julia/IFS.jl @@ -0,0 +1,29 @@ +using DelimitedFiles + +# This is a function to simulate a "chaos game" +function chaos_game(n::Int, shape_points) + + # Initializing the output array and the initial point + output_points = zeros(n,2) + point = [rand(), rand()] + + for i = 1:n + output_points[i,:] .= point + point = 0.5*(rand(shape_points) .+ point) + end + + return output_points + +end + +# This will generate a Sierpinski triangle with a chaos game of n points for an +# initial triangle with three points on the vertices of an equilateral triangle: +# A = (0.0, 0.0) +# B = (0.5, sqrt(0.75)) +# C = (1.0, 0.0) +# It will output the file sierpinski.dat, which can be plotted after +shape_points = [[0.0, 0.0], + [0.5, sqrt(0.75)], + [1.0, 0.0]] +output_points = chaos_game(10000, shape_points) +writedlm("sierpinski.dat", output_points) diff --git a/contents/IFS/code/powershell/IFS.ps1 b/contents/IFS/code/powershell/IFS.ps1 new file mode 100644 index 000000000..086c80072 --- /dev/null +++ b/contents/IFS/code/powershell/IFS.ps1 @@ -0,0 +1,34 @@ +# This function simulates a "chaos game" +function Simulate-ChaosGame($n, $shapePoints) { + $outputPoints = New-Object System.Collections.ArrayList + + # Initialize the starting point + $point = @($(Get-Random -Minimum 0.0 -Maximum 1.0), $(Get-Random -Minimum 0.0 -Maximum 1.0)) + + for ($i = 0; $i -lt $n; $i++) { + $outputPoints.add($point) | Out-Null + $temp = $shapePoints[$(Get-Random -Maximum $shapePoints.Count)] + + $point = @( + 0.5 * ($point[0] + $temp[0]) + 0.5 * ($point[1] + $temp[1]) + ) + } + + return $outputPoints +} + + +# This will generate a Sierpinski triangle with a chaos game of n points for an +# initial triangle with three points on the vertices of an equilateral triangle: +# A = (0.0, 0.0) +# B = (0.5, sqrt(0.75)) +# C = (1.0, 0.0) +# It will output the file sierpinski.dat, which can be plotted after +$shapePoints = @( + @(0.0, 0.0), + @(0.5, [math]::sqrt(0.75)), + @(1.0, 0.0) +) + +Simulate-ChaosGame -n 10000 -shapePoints $shapePoints | % { "$($_[0])`t$($_[1])" } | Out-File -Path "sierpinski.dat" diff --git a/contents/IFS/code/python/IFS.py b/contents/IFS/code/python/IFS.py new file mode 100644 index 000000000..a7808fb5c --- /dev/null +++ b/contents/IFS/code/python/IFS.py @@ -0,0 +1,25 @@ +from random import random, choice +from math import sqrt + +# This generator simulates a "chaos game" +def chaos_game(n, shape_points): + # Initialize the starting point + point = [random(), random()] + + for _ in range(n): + # Update the point position and yield the result + point = [(p + s) / 2 for p, s in zip(point, choice(shape_points))] + yield point + +# This will generate a Sierpinski triangle with a chaos game of n points for an +# initial triangle with three points on the vertices of an equilateral triangle: +# A = (0.0, 0.0) +# B = (0.5, sqrt(0.75)) +# C = (1.0, 0.0) +# It will output the file sierpinski.dat, which can be plotted after +shape_points = [[0.0, 0.0], + [0.5, sqrt(0.75)], + [1.0, 0.0]] +with open("sierpinski.dat", "w") as f: + for point in chaos_game(10000, shape_points): + f.write("{0}\t{1}\n".format(*point)) diff --git a/contents/IFS/code/rust/Cargo.toml b/contents/IFS/code/rust/Cargo.toml new file mode 100644 index 000000000..90fbca22d --- /dev/null +++ b/contents/IFS/code/rust/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "rust" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +rand = "0.8.4" + +[[bin]] +path = "./IFS.rs" +name = "main" \ No newline at end of file diff --git a/contents/IFS/code/rust/IFS.rs b/contents/IFS/code/rust/IFS.rs new file mode 100644 index 000000000..476565f0f --- /dev/null +++ b/contents/IFS/code/rust/IFS.rs @@ -0,0 +1,36 @@ +use rand::*; + +#[derive(Clone, Copy)] +struct Point { + x: f64, + y: f64, +} + +fn chaos_game(iters: usize, shapes: Vec) -> Vec { + let mut rng = rand::thread_rng(); + let mut p = Point{x: rng.gen(), y: rng.gen()}; + + (0..iters).into_iter().map(|_| { + let old_point = p; + let tmp = shapes[rng.gen_range(0..shapes.len())]; + p.x = 0.5 * (p.x + tmp.x); + p.y = 0.5 * (p.y + tmp.y); + old_point + }).collect() +} + +fn main() { + let shapes = vec![ + Point{x: 0., y: 0.}, + Point{x: 0.5, y: 0.75_f64.sqrt()}, + Point{x: 1., y: 0.}, + ]; + + let mut out = String::new(); + + for point in chaos_game(10_000, shapes) { + out += format!("{}\t{}\n", point.x, point.y).as_str(); + } + + std::fs::write("./sierpinski.dat", out).unwrap(); +} \ No newline at end of file diff --git a/contents/IFS/res/IFS_square_1.png b/contents/IFS/res/IFS_square_1.png new file mode 100644 index 000000000..06a6156ee Binary files /dev/null and b/contents/IFS/res/IFS_square_1.png differ diff --git a/contents/IFS/res/IFS_square_2.png b/contents/IFS/res/IFS_square_2.png new file mode 100644 index 000000000..42b6e0ec0 Binary files /dev/null and b/contents/IFS/res/IFS_square_2.png differ diff --git a/contents/IFS/res/IFS_square_3.png b/contents/IFS/res/IFS_square_3.png new file mode 100644 index 000000000..18785e301 Binary files /dev/null and b/contents/IFS/res/IFS_square_3.png differ diff --git a/contents/IFS/res/IFS_square_vid_1.mp4 b/contents/IFS/res/IFS_square_vid_1.mp4 new file mode 100644 index 000000000..0ea98affc Binary files /dev/null and b/contents/IFS/res/IFS_square_vid_1.mp4 differ diff --git a/contents/IFS/res/IFS_triangle_1.png b/contents/IFS/res/IFS_triangle_1.png new file mode 100644 index 000000000..959593b2f Binary files /dev/null and b/contents/IFS/res/IFS_triangle_1.png differ diff --git a/contents/IFS/res/IFS_triangle_2.png b/contents/IFS/res/IFS_triangle_2.png new file mode 100644 index 000000000..e7335866b Binary files /dev/null and b/contents/IFS/res/IFS_triangle_2.png differ diff --git a/contents/IFS/res/IFS_triangle_3.png b/contents/IFS/res/IFS_triangle_3.png new file mode 100644 index 000000000..993f1097d Binary files /dev/null and b/contents/IFS/res/IFS_triangle_3.png differ diff --git a/contents/IFS/res/IFS_triangle_4.png b/contents/IFS/res/IFS_triangle_4.png new file mode 100644 index 000000000..35a403333 Binary files /dev/null and b/contents/IFS/res/IFS_triangle_4.png differ diff --git a/contents/IFS/res/IFS_triangle_5.png b/contents/IFS/res/IFS_triangle_5.png new file mode 100644 index 000000000..b3942cdb4 Binary files /dev/null and b/contents/IFS/res/IFS_triangle_5.png differ diff --git a/contents/IFS/res/IFS_triangle_vid_1.mp4 b/contents/IFS/res/IFS_triangle_vid_1.mp4 new file mode 100644 index 000000000..165df43f2 Binary files /dev/null and b/contents/IFS/res/IFS_triangle_vid_1.mp4 differ diff --git a/contents/IFS/res/IFS_triangle_vid_2.mp4 b/contents/IFS/res/IFS_triangle_vid_2.mp4 new file mode 100644 index 000000000..7a5477979 Binary files /dev/null and b/contents/IFS/res/IFS_triangle_vid_2.mp4 differ diff --git a/contents/IFS/res/chaos_1.png b/contents/IFS/res/chaos_1.png new file mode 100644 index 000000000..03ecb4b37 Binary files /dev/null and b/contents/IFS/res/chaos_1.png differ diff --git a/contents/IFS/res/chaos_2.png b/contents/IFS/res/chaos_2.png new file mode 100644 index 000000000..81bd01718 Binary files /dev/null and b/contents/IFS/res/chaos_2.png differ diff --git a/contents/IFS/res/chaos_vid_1.mp4 b/contents/IFS/res/chaos_vid_1.mp4 new file mode 100644 index 000000000..398199836 Binary files /dev/null and b/contents/IFS/res/chaos_vid_1.mp4 differ diff --git a/contents/IFS/res/chaos_vid_2.mp4 b/contents/IFS/res/chaos_vid_2.mp4 new file mode 100644 index 000000000..a2ecf1e4f Binary files /dev/null and b/contents/IFS/res/chaos_vid_2.mp4 differ diff --git a/contents/affine_transformations/affine_transformations.md b/contents/affine_transformations/affine_transformations.md new file mode 100644 index 000000000..d832f9fc0 --- /dev/null +++ b/contents/affine_transformations/affine_transformations.md @@ -0,0 +1,340 @@ +# Affine Transformations + +Affine transformations are a class of mathematical operations that encompass rotation, scaling, translation, shearing, and several similar transformations that are regularly used for various applications in mathematics and computer graphics. +To start, we will draw a distinct (yet thin) line between affine and linear transformations before discussing the augmented matrix formalism typically used in practice. + +## A quick intro to affine (and linear) transforms + +Let us start with a provided point, $$(x,y)$$, on a two-dimensional plane. +If we treat this point as a $$1 \times 2$$ vector, we can transform it into another $$1 \times 2$$ vector by multiplying it with a $$2 \times 2$$ transformation matrix. +Similarly, a three-dimensional point could be seen as a $$1\times 3$$ vector and would need a $$3 \times 3 $$ transformation matrix. +These types of operations are known as linear transformations and are often notated as, + +$$ +\mathbf{v} = \mathbf{A}\mathbf{v}_0. +$$ + +Here, $$\mathbf{A}$$ is an $$n\times n$$ transformation matrix, where $$n$$ is the length of the input and output vectors, $$\mathbf{v_0}$$ and $$\mathbf{v}$$, respectively. +Though these transformations are powerful, all of them are centered about the origin. +Affine transformations extend linear transformations beyond this limitation and allow us to also translate our initial vector locations such that + +$$ +\textbf{v} = \mathbf{A}\mathbf{v}_0 + \ell. +$$ + +Here, $$\ell$$ is an $$n\times 1$$ translation vector. +To understand the power of these transformations, it is important to see them in practice: + +| Description | Transform | +| ----------- | --------- | +| Scaling along $$x$$ |
| +| Scaling along $$y$$ |
| +| Shearing along $$x$$ |
| +| Shearing along $$y$$ |
| +| Translation along $$x$$ |
| +| Translation along $$y$$ |
| + +For all of these visualizations, we show a set of 4 points that are assigned to the vertices of a square. +Initially, $$\mathbf{A}$$ is set to be the identity matrix and $$\ell = [0,0]$$, such that there is no transformation or translation to the input vectors. +From there, each element of $$\mathbf{A}$$ and $$\ell$$ are modified individually and the resulting transformation can be seen on the left. +The amount by which each element has been modified is shown numerically in the matrix representation and also as small dials underneath. + +The hope is that these visualizations show that each element within $$\mathbf{A}$$ and $$\ell$$ are simply dials that can be manipulated to perform a specified transformation on the set of input vectors. +Of course, it is entirely possible to move more than one dial at a time, which is why it is worth diving into an example that everyone loves: rotation. + +### Rotation: a special side-note + +I will be honest, when I initially learned how to perform rotation with a linear transformation, I did not really understand how it worked. +For this reason, I think it is important to delve a bit deeper into this topic, hopefully providing an intuitive explanation for those who are new (and potentially those who already use the rotation matrix regularly, but do not fully understand it). + +If someone were to take the set of dials shown above and mix them to create a rotational effect, they might start by shearing in one direction along $$x$$ and then another direction along $$y$$ which will create a "pseudo-rotation" effect. +This is definitely a step in the right direction, but if the shearing components are modified while the other components remain 1, the points will also move further away from the origin. +For this reason, an additional scaling along $$x$$ and $$y$$ is necessary. +This is shown in the following animation: + +
+ +
+ +Here, we see that (at least for angles less than $$\pi/2$$), rotation is simply a matter of shearing in opposite directions and scaling accordingly. +Now the only question is, *"How do we know the amount we need to shear and scale?"* + +Well, the answer is not particularly surprising. +If we want to rotate our points, we probably are already imagining this rotation along a circle with some angle $$\theta$$. +We know that the identity matrix should correspond to a non-rotated object with $$\theta = 0$$. +For this reason, we know that two elements should start at 1 (note: $$\cos(0) = 1$$) and the other two should start at 0 (note: $$\sin(0) = 0$$). +We also know that the shearing should happen in opposite directions, so we might guess that the rotation matrix would be: + +$$ +\mathbf{A}_{\text{rot}} = \begin{bmatrix} +\cos(\theta) & -\sin(\theta) \\ +\sin(\theta) & \cos(\theta) \\ +\end{bmatrix} +$$ + +In this case, the amount we want to shear should start at 0 when $$\theta = 0$$ and then go to $$\pm 1$$ when $$\theta = \pm \pi/2$$. +Meanwhile, the scale factor should start at 1 when $$\theta = 0$$ and go to $$0$$ when $$\theta = \pi/2$$. + +This *seems* right, but it is worth dwelling on this a bit more. +If the scale factor is 0 at $$\pi/2$$, surely this means that all points on the square are also at 0, right? +After all, anything scaled by 0 should be 0! +Well, not exactly. +In this case, + +$$ +\mathbf{A} = \begin{bmatrix} +1 & 0 \\ +0 & 1 \\ +\end{bmatrix} +\rightarrow +\begin{bmatrix} +0 & -1 \\ +1 & 0 \\ +\end{bmatrix} +$$ + +This means that even though the scaling components are 0, the shear components are $$\pm 1$$. +This might still be a little confusing so let us multiply the vector $$[1,2]$$ with both of these matrices: + +$$ +\begin{align} +\begin{bmatrix} +1 & 0 \\ +0 & 1 \\ +\end{bmatrix} +\begin{bmatrix} +1 \\ +2 \\ +\end{bmatrix} +&= +\begin{bmatrix} +1 \\ +2 \\ +\end{bmatrix},\\ + + +\begin{bmatrix} +0 & -1 \\ +1 & 0 \\ +\end{bmatrix} +\begin{bmatrix} +1 \\ +2 \\ +\end{bmatrix} +&= +\begin{bmatrix} +-2 \\ +1 \\ +\end{bmatrix}. + +\end{align} +$$ + +Here, we see that when multiplying by the identity matrix, the vector remains the same, but when multiplying by the second matrix, the x and y components flip. +Essentially, all of the vector magnitude moved into the "shear" component, while none of it remains in the "scale" component. + +My point is that even though it is useful to think of two of our dials as scale factors along $$x$$ and $$y$$, it does not necessarily paint the whole picture and it is important to consider how these different components work together. + +Before continuing to show what the $$\mathbf{A}_{\text{rot}}$$ matrix does when applied to a square, it is worth considering two somewhat related matrices where the identity matrix is modified with only the $$\sin(\theta)$$ or $$\cos(\theta)$$ components. + +| Description | Transform | +| ----------- | --------- | +| Just sines |
| +| Just cosines |
| + +Here, we see two completely different behaviors: + +1. In the sine-only case, we see that as $$\theta$$ wraps around from $$0 \rightarrow 2\pi$$, the square seems to grow and rotate like expected, but at $$\pi/2$$, it somewhat abruptly decides to move in the other direction. +2. In cosine-only case, we see the square flip around entirely at $$\pi/2$$. + +Before watching the next video, it is important to think for a little bit about how these two different interactions will work together in practice. +When you are ready, go ahead and click the play button: + +
+ +
+ +At least for me, it took some thinking to figure out why the two animations above create rotation when put together. +When thinking about it, it makes sense that at $$\pi/2$$, the sine component will start to encourage the square to slowly oscillate back towards the original position, but will be tugged in the opposite direction by the cosine component that has turned negative at the same time. +This "coincidence" is what creates a rotational effect. + +Overall, the rotation matrix is a fun and interesting application to linear transformations that really helped me understand how the entire class of operations can be used to create more complicated movements. + +### Guarantees of affine transformations + +At this stage, we have discussed what affine transforms are from a functional perspective; however, (as always) there is a lot more to discuss. +This particular chapter is meant to provide an intuitive feel for the transformations for those who might need to use them for whatever application they need, so I am hesitant to dive too deeply into more rigorous definitions; however, it is important to talk about certain properties of affine transforms that make them suitable for a wide variety of applications. +Namely, affine transformations preserve the following: + +1. **collinearity between points**. This means that any points that are on the same line before an affine transform must be on that same line after the transformation. The line can still change in slope or position. +2. **parallelism between lines**. Any lines parallel before the transform must also be parallel after. +3. **ratios of the lengths of parallel line segments**. This means if you have two different line segments, one of which is parameterized by $$p_1$$ and $$p_2$$, while the other is parameterized by $$p_3$$ and $$p_4$$, then $$\frac{\vec{p_1 p_2}}{\vec{p_3 p_4}}$$ must be the same before and after transformation. +4. **convexity of any transformed shape**. If a shape does not have any concave component (a point that points in towards its center), then it cannot have a concave component after the transformation. +5. **barycenters of the collection of points**. The barycenter is the collective center of mass of the system, like the balancing point for a plate. Essentially, there is an equal amount of "stuff" on either side of the barycenter. This location must remain at the same location relative to each point after transformation. + +Again, there is a lot more we could talk about, but I feel we will leave more rigorous discussions for later if we need them for subsequent algorithms. +Instead, I believe it is useful to move on to a relatively common implementation of affine transformations: the augmented matrix formalism. + +## Augmented matrix implementation + +As stated before, affine transformations are basically a mix of a transformation matrix and translation. +For two-dimensional input vectors, the augmented matrix formalism combines both of these into a large $$3 \times 3$$ transformation matrix. +If you are like me, this might be a bit confusing. +After all, if the two-dimensional vector is described by a $$1 \times 2$$ array, then how do you do a matrix multiplication with a $$3 \times 3$$ array? + +To be honest, the answer *feels* like a bit of a hack: we simply append a 1 to the end of the input, output, and translation vectors, such that: + +$$ +\begin{bmatrix} +\mathbf{v} \\ +1 \\ +\end{bmatrix} += +\left[\begin{array}{@{}ccc|c@{}} + & \mathbf{A} & & \ell \\ +0 & \cdots & 0 & 1 \\ +\end{array}\right] +\begin{bmatrix} +\mathbf{v}_0 \\ +1 +\end{bmatrix} +$$ + +So, using + +$$ +\begin{align} +\mathbf{v}_0 &= \begin{bmatrix} +1 \\ +2 \\ +\end{bmatrix} \\ +\mathbf{A} &= \begin{bmatrix} +0 & -1 \\ +1 & 0 \\ +\end{bmatrix} \\ +\ell &= \begin{bmatrix} +0 \\ +0 \\ +\end{bmatrix} +\end{align}, +$$ + +we would perform the following computation: + +$$ +\begin{bmatrix} +\mathbf{v} \\ +1 \\ +\end{bmatrix} += +\left[\begin{array}{@{}cc|c@{}} +0 & -1 & 0 \\ +1 & 0 & 0 \\ +0 & 0 & 1 \\ +\end{array}\right] +\begin{bmatrix} +1 \\ +2 \\ +1 +\end{bmatrix} +$$ + +Doing this, we find that $$\mathbf{v} = [-2,1]$$, just as we found in the previous example. +Ok, now we need to talk about why this works. + +Appending the 1 to the end of the two-dimensional vectors essentially turn them into three-dimensional vectors, with the $$z$$ dimension simply set to be 1. +The easiest way to visualize this is by thinking of the top plane on a larger cube, so here are the same vector operations as before on that cube: + +| Description | Transform | +| ----------- | --------- | +| Scaling along $$x$$ |
| +| Scaling along $$y$$ |
| +| Shearing along $$x$$ |
| +| Shearing along $$y$$ |
| +| Translation along $$x$$ |
| +| Translation along $$y$$ |
| + +The shear and scaling operations seem about the same as before; however, the translation operations are now clearly a shear along the entire cube! +The only reason this acts as translation for two dimensions is because we only care about the slice through the cube at $$z=1$$. + +Now, the reason I always feel this implementation is a bit hacky is because there is a little magic that everyone keeps quiet about: the last row in the matrix. +With all of the operations shown above, it was simply set to $$[0,0,1]$$ and never touched again... +But that is terribly unsatisfying! + +What would happen if we actually moved those dials and modified the bottom row? +Well... + +| Description | Transform | +| ----------- | --------- | +| Shearing along $$z$$ and $$x$$ |
| +| Shearing along $$z$$ and $$y$$ |
| +| Scaling along $$z$$ |
| + +In this case, the first two components are shearing along $$z$$ and $$x$$ and $$z$$ and $$y$$, while the last component is a scale along $$z$$. +If someone was taking a picture from above, none of these transformations would be visible. +Because we are hyper-focused on the top-down view for affine transformations, none of these operations are technically affine; however, they are still linear, and it is still nice to show all possible linear transforms for the cube as well. + +Finally, let us go back to the rotation example: + +
+ +
+ +Here, we see that we can embed just about any affine transformation into three dimensional space and still see the same results as in the two dimensional case. +I think that is a nice note to end on: affine transformations are linear transformations in an $$n+1$$ dimensional space. + +## Video Explanation + +Here is a video describing affine transformations: + +
+ +
+ + + +## License + +##### Code Examples + +The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/main/LICENSE.md)). + +##### Text + +The text of this chapter was written by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). + +[

](https://creativecommons.org/licenses/by-sa/4.0/) + +##### Images/Graphics +- The video "[A11 square](res/a11_square_white.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[A22 square](res/a22_square_white.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[A12 square](res/a12_square_white.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[A21 square](res/a21_square_white.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[A13 square](res/a13_square_white.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[A23 square](res/a23_square_white.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Semi Rotate](res/semi_rotate_white.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Sines](res/sines_white.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Cosines](res/cosines_white.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Rotate Square](res/rotation_square_white.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[A11 cube](res/a11_cube_white.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[A22 cube](res/a22_cube_white.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[A12 cube](res/a12_cube_white.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[A21 cube](res/a21_cube_white.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[A13 cube](res/a13_cube_white.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[A23 cube](res/a23_cube_white.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[A31 cube](res/a31_cube_white.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[A32 cube](res/a32_cube_white.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[A33 cube](res/a33_cube_white.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Rotation cube](res/rotation_cube_white.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). + diff --git a/contents/affine_transformations/res/a11_cube_white.mp4 b/contents/affine_transformations/res/a11_cube_white.mp4 new file mode 100644 index 000000000..060e0bed1 Binary files /dev/null and b/contents/affine_transformations/res/a11_cube_white.mp4 differ diff --git a/contents/affine_transformations/res/a11_square_white.mp4 b/contents/affine_transformations/res/a11_square_white.mp4 new file mode 100644 index 000000000..0af302c7b Binary files /dev/null and b/contents/affine_transformations/res/a11_square_white.mp4 differ diff --git a/contents/affine_transformations/res/a12_cube_white.mp4 b/contents/affine_transformations/res/a12_cube_white.mp4 new file mode 100644 index 000000000..885cd01ab Binary files /dev/null and b/contents/affine_transformations/res/a12_cube_white.mp4 differ diff --git a/contents/affine_transformations/res/a12_square_white.mp4 b/contents/affine_transformations/res/a12_square_white.mp4 new file mode 100644 index 000000000..cd420c043 Binary files /dev/null and b/contents/affine_transformations/res/a12_square_white.mp4 differ diff --git a/contents/affine_transformations/res/a13_cube_white.mp4 b/contents/affine_transformations/res/a13_cube_white.mp4 new file mode 100644 index 000000000..bd9663822 Binary files /dev/null and b/contents/affine_transformations/res/a13_cube_white.mp4 differ diff --git a/contents/affine_transformations/res/a13_square_white.mp4 b/contents/affine_transformations/res/a13_square_white.mp4 new file mode 100644 index 000000000..6657543e9 Binary files /dev/null and b/contents/affine_transformations/res/a13_square_white.mp4 differ diff --git a/contents/affine_transformations/res/a21_cube_white.mp4 b/contents/affine_transformations/res/a21_cube_white.mp4 new file mode 100644 index 000000000..51a56bf73 Binary files /dev/null and b/contents/affine_transformations/res/a21_cube_white.mp4 differ diff --git a/contents/affine_transformations/res/a21_square_white.mp4 b/contents/affine_transformations/res/a21_square_white.mp4 new file mode 100644 index 000000000..5b3f19ed3 Binary files /dev/null and b/contents/affine_transformations/res/a21_square_white.mp4 differ diff --git a/contents/affine_transformations/res/a22_cube_white.mp4 b/contents/affine_transformations/res/a22_cube_white.mp4 new file mode 100644 index 000000000..bcdd9b306 Binary files /dev/null and b/contents/affine_transformations/res/a22_cube_white.mp4 differ diff --git a/contents/affine_transformations/res/a22_square_white.mp4 b/contents/affine_transformations/res/a22_square_white.mp4 new file mode 100644 index 000000000..0948a17fc Binary files /dev/null and b/contents/affine_transformations/res/a22_square_white.mp4 differ diff --git a/contents/affine_transformations/res/a23_cube_white.mp4 b/contents/affine_transformations/res/a23_cube_white.mp4 new file mode 100644 index 000000000..dd61cb439 Binary files /dev/null and b/contents/affine_transformations/res/a23_cube_white.mp4 differ diff --git a/contents/affine_transformations/res/a23_square_white.mp4 b/contents/affine_transformations/res/a23_square_white.mp4 new file mode 100644 index 000000000..e989e7cbf Binary files /dev/null and b/contents/affine_transformations/res/a23_square_white.mp4 differ diff --git a/contents/affine_transformations/res/a31_cube_white.mp4 b/contents/affine_transformations/res/a31_cube_white.mp4 new file mode 100644 index 000000000..08751b30e Binary files /dev/null and b/contents/affine_transformations/res/a31_cube_white.mp4 differ diff --git a/contents/affine_transformations/res/a32_cube_white.mp4 b/contents/affine_transformations/res/a32_cube_white.mp4 new file mode 100644 index 000000000..86b86adfa Binary files /dev/null and b/contents/affine_transformations/res/a32_cube_white.mp4 differ diff --git a/contents/affine_transformations/res/a33_cube_white.mp4 b/contents/affine_transformations/res/a33_cube_white.mp4 new file mode 100644 index 000000000..7c014df54 Binary files /dev/null and b/contents/affine_transformations/res/a33_cube_white.mp4 differ diff --git a/contents/affine_transformations/res/cosines_white.mp4 b/contents/affine_transformations/res/cosines_white.mp4 new file mode 100644 index 000000000..204defb36 Binary files /dev/null and b/contents/affine_transformations/res/cosines_white.mp4 differ diff --git a/contents/affine_transformations/res/rotation_cube_white.mp4 b/contents/affine_transformations/res/rotation_cube_white.mp4 new file mode 100644 index 000000000..355cceddf Binary files /dev/null and b/contents/affine_transformations/res/rotation_cube_white.mp4 differ diff --git a/contents/affine_transformations/res/rotation_square_white.mp4 b/contents/affine_transformations/res/rotation_square_white.mp4 new file mode 100644 index 000000000..fd782f60b Binary files /dev/null and b/contents/affine_transformations/res/rotation_square_white.mp4 differ diff --git a/contents/affine_transformations/res/semi_rotate_white.mp4 b/contents/affine_transformations/res/semi_rotate_white.mp4 new file mode 100644 index 000000000..616288a36 Binary files /dev/null and b/contents/affine_transformations/res/semi_rotate_white.mp4 differ diff --git a/contents/affine_transformations/res/sines_white.mp4 b/contents/affine_transformations/res/sines_white.mp4 new file mode 100644 index 000000000..535a50625 Binary files /dev/null and b/contents/affine_transformations/res/sines_white.mp4 differ diff --git a/contents/approximate_counting/approximate_counting.md b/contents/approximate_counting/approximate_counting.md new file mode 100644 index 000000000..da3c63c8c --- /dev/null +++ b/contents/approximate_counting/approximate_counting.md @@ -0,0 +1,408 @@ +# The Approximate Counting Algorithm + +This might seem like a straightforward question, but how high can you count on your fingers? +This depends on how many fingers you have, but in general the answer has to be 10, right? + +Well, not exactly, it can actually go much, much higher with a few simple abstractions. + +The first strategy is to think of your fingers as binary registers, like so {{ "3b1b_finger_count" | cite }}: + +

+ +

+ +If your fingers are out, they count as a 1 for that register. +If they are not, they count as a 0. +This means that after you have decided on the appropriate finger configuration, you have created a bitstring that can be read from left to right, where each number represents a power of 2. +For this example, we would have a bitstring of 1110010101, which reads to 917: + +$$ +1 \cdot 2^9 + 1 \cdot 2^8 + 1 \cdot 2^7 + 0 \cdot 2^6 + 0 \cdot 2^5 + 1 \cdot 2^4 + 0 \cdot 2^3 + 1 \cdot 2^2 + 0 \cdot 2^1 + 1 \cdot 2^0 +$$ +$$ += +$$ + +$$ +512 + 256 + 128 + 16 + 4 + 1 = 917 +$$ + +Because you have 10 fingers and each one represents a power of 2, you can count up to a maximum of $$2^{10}-1$$ or 1023, which is about 100 times higher than simple finger counting! +For those who might be wondering why you can count to $$2^{10}-1$$ instead of $$2^{10}$$ exactly, remember that each finger represents a power of 2. +The right thumb counts as $$2^0 = 1$$ and the left thumb is $$2^9 = 512$$. +With all fingers out, we have counted to $$\sum_{n=0}^9 2^n = 1023 = 2^{10}-1$$. + +So what if we wanted to go beyond 1023? +Well, we could start counting with our fingers as trits where 0 is closed, 1 is half-up, and 2 is fully up. +There are actually a huge variety of different ways we could move our hands about to count in odd ways, but we are interested in a more concrete problem: how high can we count with only 10 bits? + +This is almost exactly the problem that Morris encountered in Bell Labs around 1977 {{"morris1978counting" | cite }}. +There, he was given an 8-bit register and asked to count much higher than $$2^8 - 1= 255$$. +His solution was to invent a new method known as the approximate counting algorithm. +With this method, he could count to about $$130,000$$ with a relatively low error (standard deviation, $$\sigma \approx 17,000$$). +Using 10 registers (fingers), he could count to about $$1.1\times 10^{16}$$ with similar parameters, which is undoubtedly impressive! + +The approximate counting algorithm is an early predecessor to streaming algorithms where information must be roughly processed in real-time. +As we dive into those methods later, this chapter will certainly be updated. +For now, we will not be showing any proofs (though those might come later as well), but a rigorous mathematical description of this method can be found in a follow-up paper by Philippe Flajolet {{ "flajolet1985approximate" | cite }}. +In addition, there are several blogs and resources online that cover the method to varying degrees of accessibility {{"arpit_counting" | cite }} {{"greggunderson_counting" | cite }}. + +Here, we hope to provide a basic understanding of the method, along with code implementations for anyone who might want to try something similar in the future. + +## A Simple Example + +If we need to count more than 255 items with 8 bits, there is one somewhat simple strategy: count every other item. +This means that we will increment our counter with 2, 4, 6, 8... items, effectively doubling the number of items we can count to 511! +(Note: that "!" is out of excitement and is not a factorial.) +Similarly, if we need to count above 511, we can increment our counter every 3 or 4 items; however, the obvious drawback to this method is that if we only count every other item, there is no way to represent odd numbers. +Similarly, if we count every 3rd or 4th item, we would miss out on any numbers that are not multiples of our increment number. + +The most important thing to take away from this line of reasoning is that counting can be done somewhat approximately by splitting the process into two distinct actions: incrementing the counter and storing the count, itself. +For example, every time a sheep walks by, you could lift a finger. +In this case, the act of seeing a sheep is a trigger for incrementing your counter, which is stored on your hand. +As mentioned, you could also lift a finger every time 2 or 3 sheep go by to count higher on your hand. +In code, bits are obviously preferred to hands for long-term storage. + +Taking this example a bit further, imagine counting 1,000,000 sheep. +If we wanted to save all of them on 8 bits (maximum size of 255), we could increment our counter every $$\sim 4000$$ sheep. +By counting in this way, we would first need to count around 4000 sheep before incrementing the main counter by 1. +After all the sheep have gone by, we would have counted up to 250 on our counter, and also counted up to $$4000$$ on a separate counter 250 times. +This has a few important consequences: +1. If the final number of sheep is not a multiple of 4000, then we will have an error associated with the total count of up to 4000 (0.4%). +2. There is no way to determine the final number of sheep if it is not a multiple of 4000. +3. We now need some way to count up to 4000 before incrementing the main counter. +This means we need a second counter! + +In a sense, 4000 would be a type of "counting resolution" for our system. +Overall, a 0.4% error is not bad, but it is possible to ensure that the approximate count is more accurate (but potentially less precise) by using randomness to our advantage. + +That is to say, instead of incrementing out counter every 4000th sheep, we could instead give each item a $$1/4000 = 0.025\%$$ chance of incrementing our main counter. +This averages out to be roughly 1 count every 4000 sheep, but the expectation value of a large number of counting experiments should be the correct number. +This means that even though we need to count all the sheep multiple times to get the right expectation value, we no longer need to keep a separate counter for the counting resolution of 4000. + +Because multiple counting trials are necessary to ensure the correct result, each counting experiment will have some associated error (sometimes much higher than 0.4%). +To quantify this error, let's actually perform multiple the experiment, as shown below: + +

+ +

+ +In this image, we have counted 1,000,000 sheep (items) 10,000 different times. +In each run, we have given each item a 0.025% chance to flip our primary counter and have given each increment in our primary counter a weight of 4000 items. +We have plotted 10 of the 10,000 runs (chosen at random), and each upward tick of the lines represents one of the items winning a game of chance and adding 1 to the primary counter and thus adding 4000 to the approximate count. +We have also shaded the maximum and minimum approximate count for each true count of the 10,000 trials in gray, thereby highlighting the range of possible outputs. +On top of the plot, we have shown the distribution of all 10,000 runs for the approximate count at 10,000, 500,000, and 1,000,000 items. + +There's a lot to unpack here, so let's start with the upward trending lines. +Here, it seems like the approximate counts are roughly following the line of $$y=x$$ (dotted black line), which would indicate simple counting (without any randomness or approximation). +This makes sense because in a perfect world, the approximate count would always be exactly equal to the true number of items being counted. +Unfortunately, none of the lines shown here exactly follow $$y=x$$. +In fact, it would be impossible for any of the approximations to do so because we are always increasing the approximation in steps of 4000 while the true count increments by 1 with each new item. +That said, the *average* of all these counts together is a really good approximation for the true number of items. + +This is where the 3 additional plots come in: + +

+ +

+ +Each of these is a histogram of the approximate count for all 10,000 runs at 10,000 (left), 500,000 (middle), and 1,000,000 (left) items. +All three (especially the approximation for 1,000,000) look Gaussian, and the peak of the Gaussian seems to be the correct count. +In fact, the expectation value for our approximate counting scheme will always be correct. +In practice, this means that we can approximate any count on a small number of bits by doing a large number of counting trials and averaging their results. + +There is still a little catch that becomes more evident as we look at the approximation for 10,000 items. +In this case, even though the expectation value for the Gaussian distribution looks correct, it's kinda hard to tell exactly because there are only 8 (or so) possible values for each individual experiment. +Essentially, we are trying to count to 10,000 in steps of 4,000. +Clearly the closest we can get on any individual run is either 8,000 or 12,000, as these are multiples of 4,000. +Simply put: we cannot resolve 10,000 items with this method! + +Does this mean that this counting method is less useful for a small number of items? +In a sense, yes. +Here is a table for the true count, approximate count, and percent error for 10,000, 500,000, and 1,000,000 for the case where we do 10,000 counting experiments: + +| True Count | Approximate Count | Percent Error | +| ---------- | ----------------- | ------------- | +| 10,000 | 9,958.0 | 0.42 | +| 500,000 | 499,813.2 | 0.037 | +| 1,000,000 | 999,466.0 | 0.053 | + +Here, it seems that the percent error is 10 times higher when we count 10,000 items; however, +with these numbers, I could imagine some people reading this are thinking that we are splitting hairs. +A 0.42% error is still really good, right? +Right. +It's definitely not bad, but this was with 10,000 counting experiments. +Here a new table where we only did 10: + +| True Count | Approximate Count | Percent Error | +| ---------- | ----------------- | ------------- | +| 10,000 | 8,000.0 | 20.0 | +| 500,000 | 483,200.0 | 3.36 | +| 1,000,000 | 961,600.0 | 3.84 | + +This time, there is a 20% error when counting to 10,000. +That's unacceptably high! + +To solve this problem, we need to find some way to for the value of each increment on the actual counter to be more meaningful for lower counts. +This is precisely the job for a logarithm, which is what we will be looking at in the next section. +For now, it's important to look at another anomaly: why are the percent errors for the 500,000 and 1,000,000 cases so close? + +I gotta be honest, I don't know the correct answer here, but I would guess that it has something to do with the fact that both 500,000 and 1,000,000 are multiples of 4000 so our counting scheme can resolve both of them with roughly equal precision. +On top of that, both values are significantly higher than 4,000 so the counting resolution does not have as significant of an impact on the measured count. +Simply put, 4000 is a big step size when counting to 10,000, but a smaller one when counting to 500,000 or 1,000,000. + +As an important note, each approximate count shown in the tables above was the expectation value for a Gaussian probability distribution of different counting experiments all providing a guess at what the count could be. +Because we are no longer counting with integer increments but instead with probability distributions, we now need to quantify our error with the tools of probability, namely standard deviations. + +In the next section, we will tackle both issues brought up here: +1. In order to better approximate different scales of counting, it makes sense to use a logarithmic scale. +2. Because we are counting by using the expectation value of a Gaussian probability distribution from a set of counting experiments, it makes sense to quantify error with the tools we learned from probability and statistics. + +So I guess we should hop to it! + +## Adding a logarithm + +At this stage, I feel it's important to use terminology that more closely matches Morris's original paper {{"morris1978counting" | cite}}, so we will begin to talk about events, which are a general abstraction to the previous item / sheep analogy. +We will also introduce three different values: + +* $$n$$: the number of events that have occurred. +* $$v$$: the number we have stored in our bitstring. +* $$n_v$$: the approximate number of events that have occurred. + +It's important to stop here and think about what's actually going on. +We have a certain number of events ($$n$$) that have occurred and have stored that number on a binary register as $$v$$. +Traditionally, the number stored on the binary register would be exactly equal to the number of events, but because we do not have enough space on the register, we end up settling for an approximation of the number of events, $$n_v$$. +This is precisely what we did in the previous example, where $$v = \frac{n}{4000}$$ and $$n_v = 4000*v$$. + +As mentioned, using a constant scaling value (4000) for our approximate counting scheme means that the approximation is not ideal for a smaller number of events. +For this reason, it might be more appropriate to create a new method of storing the number of events by using a logarithmic scale, such that + +$$ +v = \log_2(1+n), +$$ + +which would mean that the approximate count would be + +$$ +n_v = 2^v-1. +$$ + +In this case, we are adding 1 to the argument of the logarithm for $$v$$ because $$\log_2(1) = 0$$ and we start counting at 1; therefore, we need some way to represent the value of 0. +Also, for this we can use any base logarithm (like $$e$$), but because we are dealing with bits, it makes sense to use base 2. +We'll talk about different bases next. +To be clear, here is a table of several values that could be stored in a bitstring along with their corresponding approximate counts: + +| $$v(n)$$ | $$n_v$$ | +| ------------------ | ----------------------- | +| $$00000000 = 0$$ | 0 | +| $$00000001 = 1$$ | $$1$$ | +| $$00000010 = 2$$ | $$3$$ | +| $$00000100 = 4$$ | $$15$$ | +| $$00010000 = 16$$ | $$65535$$ | +| $$01000000 = 64$$ | $$1.85 \times 10^{19}$$ | +| $$10000000 = 128$$ | $$3.40 \times 10^{38}$$ | +| $$11111111 = 255$$ | $$5.79 \times 10^{76}$$ | + +This means that we can hold from $$0$$ to $$2^{255} - 1 \approx 5.79 \times 10^{76}$$ with 8 bits using this new method. + +So let's now think about what happens every time a new event occurs. +To do this, Morris calculated a new value: + +$$ +\Delta = \frac{1}{n_{v+1} - n_{v}} +$$ + +where $$n_{v+1}$$ is the approximate count for the next possible value $$v$$ stored in the register. +In this case, $$\Delta$$ will always be between 0 and 1, so we can consider it to be the probability of whether we should increment our stored count or not. +For example, if we have a stored value of 2 ($$v=2$$), then + +$$ +\Delta = \frac{1}{n_3 - n_2} = \frac{1}{(2^3-1)-(2^2-1)} \approx \frac{1}{7-3} \approx 0.25. +$$ + +This indicates that there will be a 25% chance to increment $$v$$ from 2 to 3. +In practice, this means that we need to create another random variable $$r$$ and set our counter such that + +$$ +\begin{align} +\text{if } & r < \Delta, \qquad v = v + 1 \\ +\text{if } & r > \Delta, \qquad v = v. +\end{align} +$$ + +Again, $$\Delta$$ is essentially the probability that we will increment our counter with each object, and as we count higher, the probability decreases exponentially. + +

+ +

+ +Note: the $$y$$-axis to this figure is in logscale, which is why it looks like a straight line. + +Before leaving this section, it's important to note that the highest anyone can count with this method in base 2 using an 8-bit register is $$5.79 \times 10^{76}$$. +That's great! +Way, way better than 255, but we can still go higher with a different base of logarithm. +For example, if we use $$e$$ as our base, we can get up to $$e^{255}-1 = 5.56 \times 10^{110}$$. +In addition, by choosing smaller bases, we can also find a more accurate approximate count for lower values. +In practice, we want to select a base that allows us to count to a value of the same order (or one order higher) than the number of events we are expected to have. + +In the next section, we will consider how to generalize this logarithmic method to take arbitrary bases into account. + +## A slightly more general logarithm + +Let's start by considering the differences between base $$2$$ and base $$e$$. +For base $$e$$, + +$$ +\begin{align} +n_v &= e^v - 1 \\ +v &= \log_e(1+n). +\end{align} +$$ + +If we were to update our count and wanted to keep the value in the counter as accurate as possible, then the new value in the register with every new event would be + +$$ +v = \log_e(1+e^v). +$$ + +This is generally not an integer value, but $$v$$ *must* be an integer value (unless we want to try and store floating-point values (which we definitely don't have space for)), so what do we do in this situation? + +Well, let's look at the very first event where we need to increment our count from 0 to 1. +With base $$e$$, there would only be a 58% chance of counting the first event ($$\Delta = \frac{1}{1.72-0} = 0.58$$), and if the event is counted, the value in the register would be $$\approx 1.71 \neq 1$$. +Again, the expectation value for a bunch of trials is correct, but we did not have this issue with base 2, because + +$$ +v = \frac{\log_e(n+1)}{\log_e(2)} = 1 +$$ + +when $$n=1$$. +As a reminder, the above formula is a way to convert any logarithm from a given base (in this case $$e$$) to another base (in this case 2). + + +Going one step further, we need to chose a specific base to a logarithm that will at least ensure that the first count is correct, and for this reason, Morris studied a specific solution: + +$$ +\begin{align} + v &= \frac{\log(1+n/a)}{\log(1+1/a)}. \\ + n_v &= a\left(\left(1+\frac{1}{a}\right)^v-1\right). +\end{align} +$$ + +Here, $$a$$ is an effective tuning parameter and sets the maximum count allowed by the bitstring and the expected error. +The expression $$1+1/a$$ acts as a base for the logarithm and exponents and ensures that the first count of $$n=1$$ will also set the value $$v=1$$. +As an example, if the bitstring can be a maximum of 255 (for 8 bits) and we arbitrarily set +$$a=30$$, then the highest possible count with this approach will be $$\approx 130,000$$, which was the number reported in Morris's paper. +If we perform a few counting experiments, we find that this formula more closely tracks smaller numbers than before (when we were not using the logarithm): + +

+ +

+ +Now, let's pause for a second and look back at the case where our counting resolution was a constant 4000: + +

+ +

+ +It would seem that for higher counts, the previous method (with a constant counting resolution) is actually better! +Remember that in the case of a constant counting resolution, the step size is really small for higher counts, so we get a higher resolution probability distribution for when we count 500,000 and 1,000,000 items. +With the logarithmic scale, this is not the case, as the counting resolution now changes with the count, itself. +This is also why all three probability distributions for the logarithmic scaling have a similar distance between each bar. +In fact, it is probably worthwhile to look at each case more specifically: + +| Constant Counting Resolution | Logarithmic Counting Resolution | +| ---------------------------- | ------------------------------- | +| | | +| | | +| | | + +In the case where we count only to 10,000, we see a moderate increase in the resolution of the probability distribution, but in the 500,000 and 1,000,000 cases, we do not. +It's also important to notice that the logarithmic plots are a bit skewed to the left and are only Gaussian on logarithmic scales along $$x$$. +On the one hand, the logarithmic plots are nice in that they have the same relative error for all scales, but on the other hand, the error is relatively high. + +How do we fix this? +Well, by modifying the base of the logarithm with the variable $$a$$: + +

+ +

+ +Here, we show the differences in $$n_v$$ for $$25 \leq a \leq 35$$ when $$v=255$$. +It is important to twiddle $$a$$ based on what the maximum count is expected for + each experiment. +As an important note, the expected error estimate (variance) for each count will + be + +$$ +\sigma(n,a)^2 = \frac{n(n-1)}{2a}. +$$ + +Finally, before ending the paper, Morris mentioned that it is possible to pre-compute all values $$\Delta_j = (a/(a+1))^j$$ for all $$j \in [1,N]$$ where $$N$$ is the largest value possible integer with that bitstring (as an example, 255 for 8 bits). +This was probably more useful in 1978 than it is now, but it's still nice to keep in mind if you find yourself working on a machine with compute constrictions. + +## Video Explanation + +Here is a video describing the Approximate Counting Algorithm: + +
+ +
+ +## Example Code + +For this example, we have returned to the question asked above: how high can someone count on their fingers using the approximate counting algorithm? +We know from the formula that with $$a=30$$ and 10 bits, we should be able to count to $$1.1\times 10^{16}$$, but what happens when we perform the actual experiment? + +As we do not have any objects to count, we will instead simulate the counting with a `while` loop that keeps going until out bitstring is 1023 ($$2^{10}$$). + +{% method %} +{% sample lang="jl" %} +[import, lang:"julia"](code/julia/approximate_counting.jl) +{% sample lang="c" %} +[import, lang:"c"](code/c/approximate_counting.c) +{% sample lang="cpp" %} +[import, lang:"cpp"](code/cpp/approximate_counting.cpp) +{% sample lang="python" %} +[import, lang:"python"](code/python/approximate_counting.py) +{% sample lang="rs" %} +[import, lang:"rust"](code/rust/approximate_counting.rs) +{% sample lang="java" %} +[import, lang:"java"](code/java/ApproximateCounting.java) +{% endmethod %} + +### Bibliography + +{% references %} {% endreferences %} + + + +## License + +##### Code Examples + +The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/main/LICENSE.md)). + +##### Text + +The text of this chapter was written by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). + +[

](https://creativecommons.org/licenses/by-sa/4.0/) + +#### Images/Graphics + +- The image "[Finger Counting](../approximate_counting/res/hands.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[Approximate trials](../approximate_counting/res/approximations.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[Histograms](../approximate_counting/res/histograms.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[Delta v v](../approximate_counting/res/deltas.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[Approximate trials Logarithm](../approximate_counting/res/approximationsexp.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[Histograms 10,000](../approximate_counting/res/hist_1.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[Histograms exp 10,000](../approximate_counting/res/histexp_1.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[Histograms 500,000](../approximate_counting/res/hist_2.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[Histograms exp 500,000](../approximate_counting/res/histexp_2.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[Histograms 1,000,000](../approximate_counting/res/hist_3.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[Histograms exp 1,000,000](../approximate_counting/res/histexp_3.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[A from 25 to 35](../approximate_counting/res/a_change.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). diff --git a/contents/approximate_counting/code/c/SConscript b/contents/approximate_counting/code/c/SConscript new file mode 100644 index 000000000..984e0eb11 --- /dev/null +++ b/contents/approximate_counting/code/c/SConscript @@ -0,0 +1,6 @@ +Import('files_to_compile env') + +for file_info in files_to_compile: + build_target = f'#/build/{file_info.language}/{file_info.chapter}/{file_info.path.stem}' + build_result = env.C(build_target, str(file_info.path), LIBS='m') + env.Alias(str(file_info.chapter), build_result) diff --git a/contents/approximate_counting/code/c/approximate_counting.c b/contents/approximate_counting/code/c/approximate_counting.c new file mode 100644 index 000000000..f372b0953 --- /dev/null +++ b/contents/approximate_counting/code/c/approximate_counting.c @@ -0,0 +1,88 @@ +#include +#include +#include +#include +#include + +// This function returns a pseudo-random number between 0 and 1 +double drand() +{ + return (double)rand() / RAND_MAX; +} + +// This function takes +// - v: value in register +// - a: a scaling value for the logarithm based on Morris's paper +// It returns the approximate count +double n(double v, double a) +{ + return a * (pow(1 + 1 / a, v) - 1); +} + +// This function takes +// - v: value in register +// - a: a scaling value for the logarithm based on Morris's paper +// It returns a new value for v +double increment(double v, double a) +{ + // delta is the probability of incrementing our counter + double delta = 1 / (n(v + 1, a) - n(v, a)); + + if (drand() <= delta) { + return v + 1; + } + return v; +} + +// This function simulates counting and takes +// - n_items: number of items to count and loop over +// - a: a scaling value for the logarithm based on Morris's paper +// It returns n(v, a), the approximate count +double approximate_count(size_t n_items, double a) +{ + double v = 0; + for (size_t i = 0; i < n_items; ++i) { + v = increment(v, a); + } + + return n(v, a); +} + +// This function takes +// - n_trials: the number of counting trials +// - n_items: the number off items to count +// - a: a scaling value for the logarithm based on Morris's paper +// - threshold: the maximum percent error allowed +// It terminates the program on failure +void test_approximation_count(size_t n_trials, size_t n_items, double a, + double threshold) +{ + double sum = 0.0; + for (size_t i = 0; i < n_trials; ++i) { + sum += approximate_count(n_items, a); + } + double avg = sum / (double)n_trials; + + double items = (double)n_items; + if (fabs((avg - items) / items) < threshold){ + printf("passed\n"); + } + else{ + printf("failed\n"); + } +} + +int main() +{ + srand((unsigned int)time(NULL)); + + printf("[#]\nCounting Tests, 100 trials\n"); + printf("[#]\ntesting 1,000, a = 30, 10%% error\n"); + test_approximation_count(100, 1000, 30, 0.1); + printf("[#]\ntesting 12,345, a = 10, 10%% error\n"); + test_approximation_count(100, 12345, 10, 0.1); + printf("[#]\ntesting 222,222, a = 0.5, 20%% error\n"); + test_approximation_count(100, 222222, 0.5, 0.2); + + return 0; +} diff --git a/contents/approximate_counting/code/cpp/approximate_counting.cpp b/contents/approximate_counting/code/cpp/approximate_counting.cpp new file mode 100644 index 000000000..1ee2790b7 --- /dev/null +++ b/contents/approximate_counting/code/cpp/approximate_counting.cpp @@ -0,0 +1,71 @@ +#include +#include +#include +#include + +// Returns a pseudo-random number generator +std::default_random_engine& rng() { + // Initialize static pseudo-random engine with non-deterministic random seed + static std::default_random_engine randEngine(std::random_device{}()); + return randEngine; +} + +// Returns a random double in [0, 1) +double drand() { + return std::uniform_real_distribution(0.0, 1.0)(rng()); +} + +// This function takes +// - v: value in register +// - a: a scaling value for the logarithm based on Morris's paper +// It returns n(v,a), the approximate count +auto n(double v, double a) { return a * (pow((1 + 1 / a), v) - 1); } + +// This function takes +// - v: value in register +// - a: a scaling value for the logarithm based on Morris's paper +// It returns a new value for v +auto increment(int v, double a) { + // delta is the probability of incrementing our counter + const auto delta = 1 / (n(v + 1, a) - n(v, a)); + return (drand() <= delta) ? v + 1 : v; +} + +// This simulates counting and takes +// - n_items: number of items to count and loop over +// - a: a scaling value for the logarithm based on Morris's paper +// It returns n(v,a), the approximate count +auto approximate_count(int n_items, double a) { + auto v = 0; + for (auto i = 0; i < n_items; ++i) + v = increment(v, a); + + return n(v, a); +} + +// This function takes +// - n_trials: the number of counting trials +// - n_items: the number of items to count to +// - a: a scaling value for the logarithm based on Morris's paper +// - threshold: the maximum percent error allowed +// It returns a "pass" / "fail" test value +auto test_approximate_count( + int n_trials, int n_items, double a, double threshold) { + auto sum = 0.0; + for (auto i = 0; i < n_trials; ++i) + sum += approximate_count(n_items, a); + const auto avg = sum / n_trials; + return std::abs((avg - n_items) / n_items) < threshold ? "passed" : "failed"; +} + +int main() { + std::cout << "[#]\nCounting Tests, 100 trials\n"; + + std::cout << "[#]\ntesting 1,000, a = 30, 10% error \n" + << test_approximate_count(100, 1000, 30, 0.1) << "\n"; + std::cout << "[#]\ntesting 12,345, a = 10, 10% error \n" + << test_approximate_count(100, 12345, 10, 0.1) << "\n"; + // Note : with a lower a, we need more trials, so a higher % error here. + std::cout << "[#]\ntesting 222,222, a = 0.5, 20% error \n" + << test_approximate_count(100, 222222, 0.5, 0.2) << "\n"; +} diff --git a/contents/approximate_counting/code/java/ApproximateCounting.java b/contents/approximate_counting/code/java/ApproximateCounting.java new file mode 100644 index 000000000..2dd49c0f0 --- /dev/null +++ b/contents/approximate_counting/code/java/ApproximateCounting.java @@ -0,0 +1,85 @@ +import java.lang.Math; +import java.util.stream.DoubleStream; + +public class ApproximateCounting { + + /* + * This function taks + * - v: value in register + * - a: a scaling value for the logarithm based on Morris's paper + * It returns the approximate count + */ + static double n(double v, double a) { + return a * (Math.pow(1 + 1 / a, v) - 1); + } + + + /* + * This function takes + * - v: value in register + * - a: a scaling value for the logarithm based on Morris's paper + * It returns the new value for v + */ + static double increment(double v, double a) { + double delta = 1 / (n(v + 1, a) - n(v, a)); + + if (Math.random() <= delta) { + return v + 1; + } else { + return v; + } + } + + + + /* + * This function takes + * - v: value in register + * - a: a scaling value for the logarithm based on Morris's paper + * It returns the new value for v + */ + static double approximateCount(int nItems, double a) { + double v = 0; + + for (int i = 0; i < nItems; i++) { + v = increment(v, a); + } + + return n(v, a); + } + + /* + * This function takes + * - nTrials: the number of counting trails + * - nItems: the number of items to count + * - a: a scaling value for the logarithm based on Morris's paper + * - threshold: the maximum percent error allowed + * It terminates the program on failure + */ + static void testApproximateCount(int nTrials, int nItems, double a, double threshold) { + double avg = DoubleStream.generate(() -> approximateCount(nItems, a)) + .limit(nTrials) + .average() + .getAsDouble(); + + if (Math.abs((avg - nItems) / nItems) < threshold) { + System.out.println("passed"); + } else { + System.out.println("failed"); + } + } + + + public static void main(String args[]) { + System.out.println("[#]\nCounting Tests, 100 trials"); + System.out.println("[#]\ntesting 1,000, a = 30, 10% error"); + testApproximateCount(100, 1_000, 30, 0.1); + + System.out.println("[#]\ntesting 12,345, a = 10, 10% error"); + testApproximateCount(100, 12_345, 10, 0.1); + + System.out.println("[#]\ntesting 222,222, a = 0.5, 20% error"); + testApproximateCount(100, 222_222, 0.5, 0.2); + } + +} diff --git a/contents/approximate_counting/code/julia/approximate_counting.jl b/contents/approximate_counting/code/julia/approximate_counting.jl new file mode 100644 index 000000000..24c9f0fb6 --- /dev/null +++ b/contents/approximate_counting/code/julia/approximate_counting.jl @@ -0,0 +1,67 @@ +using Test + +# This function takes +# - v: value in register +# - a: a scaling value for the logarithm based on Morris's paper +# It returns n(v,a), the approximate count +function n(v, a) + a*((1+1/a)^v-1) +end + +# This function takes +# - v: value in register +# - a: a scaling value for the logarithm based on Morris's paper +# It returns a new value for v +function increment(v, a) + # delta is the probability of incrementing our counter + delta = 1/(n(v+1, a)-n(v, a)) + + if rand() <= delta + return v + 1 + else + return v + end +end + +# This simulates counting and takes +# - n_items: number of items to count and loop over +# - a: a scaling value for the logarithm based on Morris's paper +# It returns n(v,a), the approximate count +function approximate_count(n_items, a) + v = 0 + for i = 1:n_items + v = increment(v, a) + end + + return n(v, a) +end + +# This function takes +# - n_trials: the number of counting trials +# - n_items: the number of items to count to +# - a: a scaling value for the logarithm based on Morris's paper +# - threshold: the maximum percent error allowed +# It returns a true / false test value +function test_approximate_count(n_trials, n_items, a, threshold) + samples = [approximate_count(n_items, a) for i = 1:n_trials] + + avg = sum(samples)/n_trials + + if (abs((avg - n_items) / n_items) < threshold) + println("passed") + else + println("failed") + end +end + +println("[#]\nCounting Tests, 100 trials") + +println("[#]\ntesting 1,000, a = 30, 10% error") +test_approximate_count(100, 1000, 30, 0.1) + +println("[#]\ntesting 12,345, a = 10, 10% error") +test_approximate_count(100, 12345, 10, 0.1) + +# Note: with a lower a, we need more trials, so a higher % error here. +println("[#]\ntesting 222,222, a = 0.5, 20% error") +test_approximate_count(100, 222222, 0.5, 0.2) diff --git a/contents/approximate_counting/code/python/approximate_counting.py b/contents/approximate_counting/code/python/approximate_counting.py new file mode 100644 index 000000000..a8381ffe8 --- /dev/null +++ b/contents/approximate_counting/code/python/approximate_counting.py @@ -0,0 +1,52 @@ +from random import random + +# This function takes +# - v: value in register +# - a: a scaling value for the logarithm based on Morris's paper +# It returns n(v,a), the approximate_count +def n(v, a): + return a*((1 + 1/a)**v - 1) + +# This function takes +# - v: value in register +# - a: a scaling value for the logarithm based on Morris's paper +# It returns a new value for v +def increment(v, a): + delta = 1/(n(v + 1, a) - n(v, a)) + if random() <= delta: + return v + 1 + else: + return v + +#This simulates counting and takes +# - n_items: number of items to count and loop over +# - a: a scaling value for the logarithm based on Morris's paper +# It returns n(v,a), the approximate count +def approximate_count(n_items, a): + v = 0 + for i in range(1, n_items + 1): + v = increment(v, a) + return n(v, a) + +# This function takes +# - n_trials: the number of counting trials +# - n_items: the number of items to count to +# - a: a scaling value for the logarithm based on Morris's paper +# - threshold: the maximum percent error allowed +# It returns a true / false test value +def test_approximate_count(n_trials, n_items, a, threshold): + samples = [approximate_count(n_items, a) for i in range(1, n_trials + 1)] + avg = sum(samples)/n_trials + + if abs((avg - n_items)/n_items) < threshold: + print("passed") + else: + print("failed") + +print("[#]\nCounting Tests, 100 trials") +print("[#]\ntesting 1,000, a = 30, 10% error") +test_approximate_count(100, 1000, 30, 0.1) +print("[#]\ntesting 12,345, a = 10, 10% error") +test_approximate_count(100, 12345, 10, 0.1) +print("[#]\ntesting 222,222, a = 0.5, 20% error") +test_approximate_count(100, 222222, 0.5, 0.2) diff --git a/contents/approximate_counting/code/rust/Cargo.toml b/contents/approximate_counting/code/rust/Cargo.toml new file mode 100644 index 000000000..457c5d96c --- /dev/null +++ b/contents/approximate_counting/code/rust/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "approximate_counting" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +rand = "0.8.4" + +[[bin]] +name = "main" +path = "approximate_counting.rs" diff --git a/contents/approximate_counting/code/rust/approximate_counting.rs b/contents/approximate_counting/code/rust/approximate_counting.rs new file mode 100644 index 000000000..278593645 --- /dev/null +++ b/contents/approximate_counting/code/rust/approximate_counting.rs @@ -0,0 +1,67 @@ +// This function takes +// - v: value in register +// - a: a scaling value for the logarithm based on Morris's paper +// It returns n(v,a), the approximate count +fn n(v: f64, a: f64) -> f64 { + a * ((1_f64 + 1_f64 / a).powf(v) - 1_f64) +} + + +// This function takes +// - v: value in register +// - a: a scaling value for the logarithm based on Morris's paper +// It returns a new value for v +fn increment(v: f64, a: f64) -> f64 { + // delta is the probability of incrementing our counter + let delta = 1_f64 / (n(v + 1_f64, a) - n(v, a)); + + if rand::random::() <= delta { + v + 1_f64 + } else { + v + } +} + +// This simulates counting and takes +// - n_items: number of items to count and loop over +// - a: a scaling value for the logarithm based on Morris's paper +// It returns n(v,a), the approximate count +fn approximate_count(n_items: usize, a: f64) -> f64 { + let mut v = 0_f64; + + for _ in 0..n_items { + v = increment(v, a); + } + + v +} + +// This function takes +// - n_trials: the number of counting trials +// - n_items: the number of items to count to +// - a: a scaling value for the logarithm based on Morris's paper +// - threshold: the maximum percent error allowed +// It returns a "passed" / "failed" test value +fn test_approximate_count(n_trials: usize, n_items: usize, a: f64, threshold: f64) { + let avg = std::iter::from_fn(|| Some(approximate_count(n_items, a))) + .take(n_trials) + .sum::() / n_trials as f64; + + let n_items_float = n_items as f64; + + if ((avg - n_items_float) / n_items_float) < threshold { + println!("passed"); + } else { + println!("failed"); + } + +} + +fn main() { + println!("testing 1,000, a = 30, 10% error"); + test_approximate_count(100, 1000, 30_f64, 0.1); + println!("testing 12,345, a = 10, 10% error"); + test_approximate_count(100, 12345, 10_f64, 0.1); + println!("testing 222,222, a = 0.5, 20% error"); + test_approximate_count(100, 222222, 0.5, 0.2); +} diff --git a/contents/approximate_counting/res/a_change.png b/contents/approximate_counting/res/a_change.png new file mode 100644 index 000000000..6d5348202 Binary files /dev/null and b/contents/approximate_counting/res/a_change.png differ diff --git a/contents/approximate_counting/res/approximations.png b/contents/approximate_counting/res/approximations.png new file mode 100644 index 000000000..eba9000dc Binary files /dev/null and b/contents/approximate_counting/res/approximations.png differ diff --git a/contents/approximate_counting/res/approximationsexp.png b/contents/approximate_counting/res/approximationsexp.png new file mode 100644 index 000000000..097b665e5 Binary files /dev/null and b/contents/approximate_counting/res/approximationsexp.png differ diff --git a/contents/approximate_counting/res/counting.jl b/contents/approximate_counting/res/counting.jl new file mode 100644 index 000000000..e53a77aea --- /dev/null +++ b/contents/approximate_counting/res/counting.jl @@ -0,0 +1,123 @@ +using DelimitedFiles + +# n = number of events +# prob = probability of incrementing counter +function random_count(n, prob; a=30, prob_calc=false) + v = 0 + base = 1+1/a + output = zeros(n) + + if prob_calc + prob = 1 + v = 1 + end + + if rand() < prob + output[1] = 1/prob + end + + for i = 2:n + if prob_calc + prob = 1/((a*(base^(v+1)-1))-output[i-1]) + + if rand() <= prob + v += 1 + end + output[i] = a*(base^v-1) + else + if rand() <= prob + output[i] = output[i-1]+1/prob + else + output[i] = output[i-1] + end + end + end + + return output +end + +# m = number of counting trials +# l = number of saved trials +function multi_count(n, m, l, prob; a=30, prob_calc=false, file_mod="", + stops = [100000, 500000, 1000000]) + out = zeros(n, l) + extremes = zeros(n, 2) + + chosen_set = zeros(Int, l) + + for i = 1:l + chosen_number = rand(1:m) + while chosen_number in chosen_set + chosen_number = rand(1:m) + end + chosen_set[i] = chosen_number + end + + histograms = zeros(Float64, m, length(stops)) + + out_count = 1 + for i = 1:m + current_dist = random_count(n, prob; a, prob_calc) + if i == 1 + extremes[:,1] .= current_dist + extremes[:,2] .= current_dist + else + for j = 1:n + if current_dist[j] < extremes[j,1] + extremes[j,1] = current_dist[j] + end + if current_dist[j] > extremes[j,2] + extremes[j,2] = current_dist[j] + end + + end + end + + for j = 1:length(stops) + histograms[i,j] = current_dist[stops[j]] + end + + if i in chosen_set + out[:,out_count] = current_dist + out_count += 1 + end + end + + output_file = open("out"*file_mod*".dat", "w") + writedlm(output_file, out) + close(output_file) + + extreme_output_file = open("extremes"*file_mod*".dat", "w") + writedlm(extreme_output_file, extremes) + close(extreme_output_file) + + formatted_histograms = [[],[],[]] + # Going through the histogram data to put it into the right format + for j = 1:length(stops) + + max = floor(Int,maximum(histograms[:,j])) + min = floor(Int,minimum(histograms[:,j])) + + println(min, '\t', max, '\t', sum(histograms[:,j])/m) + + temp_array = zeros(max - min+1) + + for i = 1:m + + temp_array[floor(Int,histograms[i,j])-min+1] += 1 + + end + + formatted_histograms[j] = temp_array + end + + # output histograms into different files for each one + + for i = 1:length(stops) + histogram_file = open("histogram_" * string(i)*file_mod* ".dat", "w") + writedlm(histogram_file, formatted_histograms[i]) + close(histogram_file) + end + return formatted_histograms +end + diff --git a/contents/approximate_counting/res/deltas.png b/contents/approximate_counting/res/deltas.png new file mode 100644 index 000000000..003ac3622 Binary files /dev/null and b/contents/approximate_counting/res/deltas.png differ diff --git a/contents/approximate_counting/res/hands.png b/contents/approximate_counting/res/hands.png new file mode 100644 index 000000000..d64dd4508 Binary files /dev/null and b/contents/approximate_counting/res/hands.png differ diff --git a/contents/approximate_counting/res/hist_1.png b/contents/approximate_counting/res/hist_1.png new file mode 100644 index 000000000..6c39b9a1c Binary files /dev/null and b/contents/approximate_counting/res/hist_1.png differ diff --git a/contents/approximate_counting/res/hist_2.png b/contents/approximate_counting/res/hist_2.png new file mode 100644 index 000000000..a8ec08f71 Binary files /dev/null and b/contents/approximate_counting/res/hist_2.png differ diff --git a/contents/approximate_counting/res/hist_2_plot.gp b/contents/approximate_counting/res/hist_2_plot.gp new file mode 100644 index 000000000..e5908c105 --- /dev/null +++ b/contents/approximate_counting/res/hist_2_plot.gp @@ -0,0 +1,31 @@ +set terminal epslatex standalone color + +#set output "histexp_1.tex" +#set output "histexp_2.tex" +set output "histexp_3.tex" + +set size square + +#set title "True count of 10,000" +#set title "True count of 500,000" +set title "True count of 1,000,000" + +set xlabel '' +#set xrange [-255:9745] +#set xtics ("6000" -255, "10,000" 3745, "16,000" 9745) + +#set xrange [352000:644000] +#set xrange[-1458:538542] +#set xtics ("320,000" -1458, "500,000" 178542, "860,000" 538542) + +#set xrange [808000:1240000] +set xrange[-19374:1100626] +set xtics ("600,0000" -19374, "1,000,000" 380626, "1,720,000" 1100626) + +#set ylabel 'Approximate count $\left( \times 10^{5} \right)$' +#set ytics ("0" 0, "2" 200000, "4" 400000, "6" 600000, "8" 800000, "10" 1000000) + +#plot "histogram_1exp.dat" w l lw 10 title "" +#plot "histogram_2exp.dat" w l lw 10 title "" +plot "histogram_3exp.dat" w l lw 10 title "" + diff --git a/contents/approximate_counting/res/hist_3.png b/contents/approximate_counting/res/hist_3.png new file mode 100644 index 000000000..5b65c68d5 Binary files /dev/null and b/contents/approximate_counting/res/hist_3.png differ diff --git a/contents/approximate_counting/res/hist_plot.gp b/contents/approximate_counting/res/hist_plot.gp new file mode 100644 index 000000000..1de5ec860 --- /dev/null +++ b/contents/approximate_counting/res/hist_plot.gp @@ -0,0 +1,31 @@ +set terminal epslatex standalone color + +#set output "hist_1.tex" +#set output "hist_2.tex" +set output "hist_3.tex" + +set size square + +#set title "True count of 10,000" +#set title "True count of 500,000" +set title "True count of 1,000,000" + +#set xlabel '' +#set xrange [0:40000] +#set xtics ("0" 0, "20,000" 20000, "40,000" 40000) + +#set xrange [352000:644000] +#set xrange[10000:310000] +#set xtics ("350,000" 10000, "500,000" 160000, "650,000" 310000) + +#set xrange [808000:1240000] +set xrange[-10000:490000] +set xtics ("750,0000" -10000, "1,000,000" 240000, "1,250,000" 490000) + +#set ylabel 'Approximate count $\left( \times 10^{5} \right)$' +#set ytics ("0" 0, "2" 200000, "4" 400000, "6" 600000, "8" 800000, "10" 1000000) + +#plot "histogram_1.dat" w l lw 10 title "" +#plot "histogram_2.dat" w l lw 10 title "" +plot "histogram_3.dat" w l lw 10 title "" + diff --git a/contents/approximate_counting/res/histexp_1.png b/contents/approximate_counting/res/histexp_1.png new file mode 100644 index 000000000..3295a1d03 Binary files /dev/null and b/contents/approximate_counting/res/histexp_1.png differ diff --git a/contents/approximate_counting/res/histexp_2.png b/contents/approximate_counting/res/histexp_2.png new file mode 100644 index 000000000..176663701 Binary files /dev/null and b/contents/approximate_counting/res/histexp_2.png differ diff --git a/contents/approximate_counting/res/histexp_3.png b/contents/approximate_counting/res/histexp_3.png new file mode 100644 index 000000000..0b6fc5aef Binary files /dev/null and b/contents/approximate_counting/res/histexp_3.png differ diff --git a/contents/approximate_counting/res/histograms.png b/contents/approximate_counting/res/histograms.png new file mode 100644 index 000000000..d10b733ed Binary files /dev/null and b/contents/approximate_counting/res/histograms.png differ diff --git a/contents/approximate_counting/res/plot.gp b/contents/approximate_counting/res/plot.gp new file mode 100644 index 000000000..5d2f0b352 --- /dev/null +++ b/contents/approximate_counting/res/plot.gp @@ -0,0 +1,19 @@ +set terminal epslatex standalone color +#set terminal pdf enhanced +#set style fill transparent solid 0.3 + +#set output "check.tex" +set output "checkexp.tex" +#set size square + +set title "Approximate counting of 1,000,000 items" + +set xlabel 'True number of items $\left( \times 10^{5} \right)$' +set xtics ("0" 0, "2" 200000, "4" 400000, "6" 600000, "8" 800000, "10" 1000000) + +set ylabel 'Approximate count $\left( \times 10^{5} \right)$' +set ytics ("0" 0, "2" 200000, "4" 400000, "6" 600000, "8" 800000, "10" 1000000, "12" 1200000, "14" 1400000) + +#plot "extremes.dat" u 0:1:2 w filledcu lc "gray" title "", for [i=1:10] "out.dat" u 0:i w l lw 3 title "", x w l lw 3 dt 3 lc "black" title "" +plot "extremesexp.dat" u 0:1:2 w filledcu lc "gray" title "", for [i=1:10] "outexp.dat" u 0:i w l lw 3 title "", x w l lw 3 dt 3 lc "black" title "" + diff --git a/contents/approximate_counting/res/plot_a.gp b/contents/approximate_counting/res/plot_a.gp new file mode 100644 index 000000000..290c4c5fe --- /dev/null +++ b/contents/approximate_counting/res/plot_a.gp @@ -0,0 +1,15 @@ +set terminal epslatex standalone color + +set output "a_change.tex" +#set size square + +set title '$n_v$ vs $a$ for $v=255$' + +set xlabel '$a$' +set xtics ("25" 0, "27.5" 5, "30" 10, "32.5" 15, "35" 20) + +set ylabel '$n_v (\times 10^5)$' +set ytics ("0" 0, "1" 100000, "2" 200000, "3" 300000, "4" 400000, "5" 500000) + +plot for [i=1:10] "a_change.dat" w l lw 3 title "" + diff --git a/contents/approximate_counting/res/plot_deltas.gp b/contents/approximate_counting/res/plot_deltas.gp new file mode 100644 index 000000000..b6b045b1a --- /dev/null +++ b/contents/approximate_counting/res/plot_deltas.gp @@ -0,0 +1,14 @@ +set terminal epslatex standalone color + +set output "deltas.tex" +#set size square + +set title '$\Delta$ vs $v$' + +set xlabel '$v$' + +set ylabel '$\Delta$ (logscale)' +set logscale y + +plot for [i=1:10] "deltas.dat" w l lw 3 title "" + diff --git a/contents/backward_euler_method/backward_euler_method.md b/contents/backward_euler_method/backward_euler_method.md index ba3d72ba2..090ffb191 100644 --- a/contents/backward_euler_method/backward_euler_method.md +++ b/contents/backward_euler_method/backward_euler_method.md @@ -13,7 +13,7 @@ MathJax.Hub.Queue(["Typeset",MathJax.Hub]); ##### Code Examples -The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/master/LICENSE.md)). +The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/main/LICENSE.md)). ##### Text diff --git a/contents/barnes_hut_algorithm/barnes_hut_algorithm.md b/contents/barnes_hut_algorithm/barnes_hut_algorithm.md index f4d4301b5..83c6f6003 100644 --- a/contents/barnes_hut_algorithm/barnes_hut_algorithm.md +++ b/contents/barnes_hut_algorithm/barnes_hut_algorithm.md @@ -16,7 +16,7 @@ MathJax.Hub.Queue(["Typeset",MathJax.Hub]); ##### Code Examples -The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/master/LICENSE.md)). +The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/main/LICENSE.md)). ##### Text diff --git a/contents/barnsley/barnsley.md b/contents/barnsley/barnsley.md new file mode 100644 index 000000000..3f8754ab2 --- /dev/null +++ b/contents/barnsley/barnsley.md @@ -0,0 +1,178 @@ +# The Barnsley Fern + +At the end of the chapter on [Iterated Function Systems](../IFS/IFS.md), we introduced two separate attractors: the Sierpinski triangle, and a uniform two-dimensional square, shown below with their corresponding Hutchinson operator. + +| Hutchinson Operator | Attractor | +| ------------------- | --------- | +| $$\begin{align} f_1(P) &= \frac{P+A}{2} \\ f_2(P) &= \frac{P+B}{2} \\ f_3(P) &= \frac{P+C}{2} \end{align}$$ | Sierpinsky Triangle Chaos Game | +| $$\begin{align} f_1(P) &= \frac{P+A}{2} \\ f_2(P) &= \frac{P+B}{2} \\ f_3(P) &= \frac{P+C}{2} \\ f_4(P) &= \frac{P+D}{2} \end{align}$$ | Square Chaos Game | + +As a reminder, the Hutchinson operator is a set of functions that act on a point in space, $$P$$, and return another another point at a new location. +These functions are meant to be used over and over again in some fashion, and as you continually iterate through them, some shape will eventually be drawn. +This shape is known as an attractor, and the entire system is called an *iterated function system* due to the iterative nature of drawing the attractor. + +In these cases, each function will move the point to be halfway between its original position and the position of $$A$$, $$B$$, $$C$$, and $$D$$ for $$f_1$$, $$f_2$$, $$f_3$$, and $$f_4$$, respectively. +Even though $$f_1$$, $$f_2$$, and $$f_3$$ are the same for both attractors, the addition of $$f_4$$ drastically changes the final result! +It is surprising that two seemingly identical sets of functions can look so different in the end, and this leads us to a somewhat challenging question: given a set of functions, is there any way to predict what the attractor will be without iterating through the functions? + +In general, the answer is no. +You *must* sample the function set in some fashion to get find the resulting attractor. + +This feels somewhat unsettling to me. +After all, each individual function is simple, so why is the result so difficult to predict? +In this chapter, I hope to provide a slightly more satisfying answer by introducing another iterated function system with beautiful attractor, known as the Barnsley fern {{ "barnsley2014fractals" | cite }}: + +| Hutchinson Operator | Attractor | +| ------------------- | --------- | +| $$\begin{align} f_1(P) &= \begin{bmatrix} 0 &0 \\ 0 &0.16 \end{bmatrix}P + \begin{bmatrix} 0 \\ 0 \end{bmatrix} \\ f_2(P) &= \begin{bmatrix} 0.85 &0.04 \\ -0.04 &0.85 \end{bmatrix}P + \begin{bmatrix} 0 \\ 1.6 \end{bmatrix} \\ f_3(P) &= \begin{bmatrix} 0.2 &-0.26 \\ 0.23 &0.22 \end{bmatrix}P + \begin{bmatrix} 0 \\ 1.6 \end{bmatrix} \\ f_4(P) &= \begin{bmatrix} -0.15 &0.28 \\ 0.26 &0.24 \end{bmatrix}P + \begin{bmatrix} 0 \\ 0.44 \end{bmatrix} \end{align}$$ | Barnsley Chaos Game | + +At first glance, this set of functions looks like an incomprehensible mess of magic numbers to create a specific result, and in a sense, that is precisely correct. +That said, we will go through each function and explain how it works, while also providing a simple chaos game implementation in code. +By the end of this chapter, we do not hope to provide a general strategy for understanding all iterated function systems, but we hope to at least make this one set of functions a bit more understandable. + +## Individual affine transforms + +The first thing to note about the Barnsley set of functions is that each one is an [affine transformation](../affine_transformations/affine_transformations.md). +Though it is not a hard rule, most iterated function systems use affine transforms, so this notation is common. +In fact, the Sierpinski operators can also be written in an affine form: + +| Non-affine | Affine | +| ---------- | ------ | +| $$\begin{align} f_1(P) &= \frac{P+A}{2} \\ f_2(P) &= \frac{P+B}{2} \\ f_3(P) &= \frac{P+C}{2} \end{align}$$ | $$\begin{align} f_1(P) &= \begin{bmatrix} 0.5 &0 \\ 0 &0.5 \end{bmatrix}P + \frac{A}{2} \\ f_2(P) &= \begin{bmatrix} 0.5 &0 \\ 0 &0.5 \end{bmatrix}P + \frac{B}{2} \\ f_3(P) &= \begin{bmatrix} 0.5 &0 \\ 0 &0.5 \end{bmatrix}P + \frac{C}{2} \end{align}$$ | + +The affine variant performs the same operation by scaling the $$x$$ and $$y$$ component of $$P$$ by $$0.5$$ and then adding half of either $$A$$, $$B$$, or $$C$$ for $$f_1$$, $$f_2$$, or $$f_3$$, respectively. +Each of these transforms involves some linear component (scaling or shearing) with an additional translation. + +As an important side-note: in both the Barnsley and Sierpinski function systems, the coefficients of the transformation matrix are all less than 1. +This property is known as *contractivity*, and an iterated function system can only have an attractor if the system is contractive. +Upon reflection, this makes sense. +If the matrix elements were greater than 1, the point could tend towards infinity after successive iterations of the function. + +Now let's hop into disecting the Barnsley fern by seeing how each transform affects a random distribution of points: + +| Function | Operation | +| -------- | --------- | +| $$f_1(P) = \begin{bmatrix} 0 &0 \\ 0 &0.16 \end{bmatrix}P + \begin{bmatrix} 0 \\ 0 \end{bmatrix}$$

This operation moves every point to a single line. |

| +| $$f_2(P) = \begin{bmatrix} 0.85 &0.04 \\ -0.04 &0.85 \end{bmatrix}P + \begin{bmatrix} 0 \\ 1.6 \end{bmatrix}$$

This operation moves every point up and to the right. |

| +| $$f_3(P) = \begin{bmatrix} 0.2 &-0.26 \\ 0.23 &0.22 \end{bmatrix}P + \begin{bmatrix} 0 \\ 1.6 \end{bmatrix}$$

This operation rotates every point to the left. |

| +| $$f_4(P) = \begin{bmatrix} -0.15 &0.28 \\ 0.26 &0.24 \end{bmatrix}P + \begin{bmatrix} 0 \\ 0.44 \end{bmatrix}$$

This operation flips every point and rotates to the right.|

| + +At this stage, it *might* be clear what is going on, but it's not exactly obvious. +Essentially, each operation corresponds to another part of the fern: + +* $$f_1$$ creates the stem. +* $$f_2$$ creates successively smaller ferns moving up and to the right. +* $$f_3$$ creates the leaves on the right. +* $$f_4$$ creates the leaves on the left. + +The easiest way to make sense of this is to show the operations on the Barnsley fern, itself, instead of a random distribution of points. + +| Function | Operation | +| -------- | --------- | +| $$f_1(P) = \begin{bmatrix} 0 &0 \\ 0 &0.16 \end{bmatrix}P + \begin{bmatrix} 0 \\ 0 \end{bmatrix}$$ |
| +| $$f_2(P) = \begin{bmatrix} 0.85 &0.04 \\ -0.04 &0.85 \end{bmatrix}P + \begin{bmatrix} 0 \\ 1.6 \end{bmatrix}$$ |
| +| $$f_3(P) = \begin{bmatrix} 0.2 &-0.26 \\ 0.23 &0.22 \end{bmatrix}P + \begin{bmatrix} 0 \\ 1.6 \end{bmatrix}$$ |
| +| $$f_4(P) = \begin{bmatrix} -0.15 &0.28 \\ 0.26 &0.24 \end{bmatrix}P + \begin{bmatrix} 0 \\ 0.44 \end{bmatrix}$$ |
| + +Here, the self-similar nature of the fern becomes apparent. +Each operation is effectively moving a point on one part of the fern to a point on another part of the fern. + +In the final construction, it is clear that fewer points are necessary on some parts than others. +The stem, for example, does not need many points at all. +Meanwhile, the bulk of the fern seems to be generated by $$f_2$$, so we probably want the majority of the points to choose that function when iterating through he set. +To account for this, each function is also given a probability of being chosen: + +| Function | Probability | +| -------- | ----------- | +| $$f_1(P) = \begin{bmatrix} 0 &0 \\ 0 &0.16 \end{bmatrix}P + \begin{bmatrix} 0 \\ 0 \end{bmatrix}$$ | 0.01 | +| $$f_2(P) = \begin{bmatrix} 0.85 &0.04 \\ -0.04 &0.85 \end{bmatrix}P + \begin{bmatrix} 0 \\ 1.6 \end{bmatrix}$$ | 0.85 | +| $$f_3(P) = \begin{bmatrix} 0.2 &-0.26 \\ 0.23 &0.22 \end{bmatrix}P + \begin{bmatrix} 0 \\ 1.6 \end{bmatrix}$$ | 0.07 | +| $$f_4(P) = \begin{bmatrix} -0.15 &0.28 \\ 0.26 &0.24 \end{bmatrix}P + \begin{bmatrix} 0 \\ 0.44 \end{bmatrix}$$ | 0.07 | + +## Playing around a bit... + +One big advantage of using affine transformations to construct an attractor is that mathematicians and programmers can leverage their knowledge of how these transformations work to also modify the resulting image. +Here are a few examples of ferns that can be generated by modifying constituent functions: + +| Function | Operation | +| -------- | --------- | +| $$f_1(P) = \begin{bmatrix} \tau &0 \\ 0 &0.16 \end{bmatrix}P + \begin{bmatrix} 0 \\ 0 \end{bmatrix}$$

where $$-0.5 < \tau < 0.5 $$

Turning stems to leaves |

| +| $$f_2(P) = \begin{bmatrix} 0.85 & \tau \\ -0.04 &0.85 \end{bmatrix}P + \begin{bmatrix} 0 \\ 1.6 \end{bmatrix}$$

where $$ -0.01 < \tau < 0.09 $$

Changing fern tilt |

| +| $$f_3(P) = \begin{bmatrix} 0.2 &-0.26 \\ 0.23 &0.22 \end{bmatrix}P + \begin{bmatrix} \tau \\ 1.6 \end{bmatrix}$$

where $$-0.5 < \tau < 0.5$$

Plucking left leaves |

| +| $$f_4(P) = \begin{bmatrix} -0.15 &0.28 \\ 0.26 &0.24 \end{bmatrix}P + \begin{bmatrix} \tau \\ 0.44 \end{bmatrix}$$

where $$-0.5 < \tau < 0.5$$

Plucking right leaves |

| + +As an important note: the idea of modifying a resulting image by twiddling the knobs of an affine transform is the heart of many interesting methods, including fractal image compression where a low resolution version of an image is stored along with a reconstructing function set to generate high-quality images on-the-fly {{ "fractal-compression" | cite }}{{ "saupe1994review" | cite }}. +If this seems mystifying, don't worry! +We'll definitely come back to this soon, I just wanted to briefly mention it now so it's on everyone's mind as we move forward. + + +## Video Explanation + +Here is a video describing the Barnsley fern: + +
+ +
+ +## Example Code + +Similar to the chapter on [iterated function systems](../IFS/IFS.md), the example code here will show a chaos game for the construction of an attractor; +however, in this case the attractor will be the Barnsley fern instead of the Sierpinski triangle. +The biggest differences between the two code implementations is that the Barnsley implementation must take into account the varying probabilities for choosing each function path and that we will be choosing an initial point that is *on* the attractor this time (namely $$(0,0)$$). + +{% method %} +{% sample lang="jl" %} +[import, lang:"julia"](code/julia/barnsley.jl) +{% sample lang="rs" %} +[import, lang:"rust"](code/rust/barnsley.rs) +{% sample lang="cpp" %} +[import, lang:"cpp"](code/cpp/barnsley.cpp) +{% sample lang="c" %} +[import, lang:"c"](code/c/barnsley.c) +{% sample lang="java" %} +[import, lang:"java"](code/java/Barnsley.java) +{% sample lang="coco" %} +[import, lang:"coconut"](code/coconut/barnsley.coco) +{% sample lang="hs" %} +[import, lang:"haskell"](code/haskell/Barnsley.hs) +{% endmethod %} + +### Bibliography + +{% references %} {% endreferences %} + + + +## License + +##### Code Examples + +The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/main/LICENSE.md)). + +##### Text + +The text of this chapter was written by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). + +[

](https://creativecommons.org/licenses/by-sa/4.0/) + +#### Images/Graphics + +- The image "[IFS triangle 1](../IFS/res/IFS_triangle_1.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[IFS square 3](../IFS/res/IFS_square_3.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[Simple Barnsley fern](res/full_fern.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Affine random transform 0](res/affine_rnd_0.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Affine random transform 1](res/affine_rnd_1.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Affine random transform 2](res/affine_rnd_2.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Affine random transform 3](res/affine_rnd_3.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Affine fern transform 0](res/affine_fern_0.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Affine fern transform 1](res/affine_fern_1.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Affine fern transform 2](res/affine_fern_2.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Affine fern transform 3](res/affine_fern_3.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Fern twiddle 0](res/fern_twiddle_0.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Fern twiddle 1](res/fern_twiddle_1.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Fern twiddle 2](res/fern_twiddle_2.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Fern twiddle 3](res/fern_twiddle_3.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). diff --git a/contents/barnsley/code/c/barnsley.c b/contents/barnsley/code/c/barnsley.c new file mode 100644 index 000000000..db154b777 --- /dev/null +++ b/contents/barnsley/code/c/barnsley.c @@ -0,0 +1,109 @@ +#include +#include + +struct matrix { + double xx, xy, xz, + yx, yy, yz, + zx, zy, zz; +}; + +struct point2d { + double x, y; +}; + +struct point3d { + double x, y, z; +}; + +struct point3d matmul(struct matrix mat, struct point3d point) +{ + struct point3d out = { + mat.xx * point.x + mat.xy * point.y + mat.xz * point.z, + mat.yx * point.x + mat.yy * point.y + mat.yz * point.z, + mat.zx * point.x + mat.zy * point.y + mat.zz * point.z + }; + return out; +} + +// This function reads in the Hutchinson operator and corresponding +// probabilities and returns a randomly selected transform +// This works by choosing a random number and then iterating through all +// probabilities until it finds an appropriate bin +struct matrix select_array(struct matrix *hutchinson_op, double *probabilities, + size_t num_op) +{ + // random number to be binned + double rnd = (double)rand() / RAND_MAX; + + // This checks to see if a random number is in a bin, if not, that + // probability is subtracted from the random number and we check the next + // bin in the list + for (size_t i = 0; i < num_op; ++i) { + if (rnd < probabilities[i]) { + return hutchinson_op[i]; + } + rnd -= probabilities[i]; + } + return hutchinson_op[0]; +} + +// This is a general function to simulate a chaos game +// - output_points: pointer to an initialized output array +// - num: the number of iterations +// - initial_point: the starting point of the chaos game +// - hutchinson_op: the set of functions to iterate through +// - probabilities: the set of probabilities corresponding to the likelihood +// of choosing their corresponding function in hutchingson_op +// - nop: the number of functions in hutchinson_op +void chaos_game(struct point2d *output_points, size_t num, + struct point2d initial_point, struct matrix *hutchinson_op, + double *probabilities, size_t nop) +{ + // extending point to 3D for affine transform + struct point3d point = {initial_point.x, initial_point.y, 1.0}; + + for (size_t i = 0; i < num; ++i) { + point = matmul(select_array(hutchinson_op, probabilities, nop), point); + output_points[i].x = point.x; + output_points[i].y = point.y; + } +} + +int main() +{ + struct matrix barnsley_hutchinson[4] = { + { + 0.0, 0.0, 0.0, + 0.0, 0.16, 0.0, + 0.0, 0.0, 1.0 + }, + { + 0.85, 0.04, 0.0, + -0.04, 0.85, 1.60, + 0.0, 0.0, 1.0 + }, + { + 0.2, -0.26, 0.0, + 0.23, 0.22, 1.60, + 0.0, 0.0, 1.0 + }, + { + -0.15, 0.28, 0.0, + 0.26, 0.24, 0.44, + 0.0, 0.0, 1.0 + } + }; + + double barnsley_probabilities[4] = {0.01, 0.85, 0.07, 0.07}; + struct point2d output_points[10000]; + struct point2d initial_point = {0.0, 0.0}; + chaos_game(output_points, 10000, initial_point, barnsley_hutchinson, + barnsley_probabilities, 4); + FILE *f = fopen("barnsley.dat", "w"); + for (size_t i = 0; i < 10000; ++i) { + fprintf(f, "%f\t%f\n", output_points[i].x, output_points[i].y); + } + fclose(f); + + return 0; +} diff --git a/contents/barnsley/code/coconut/barnsley.coco b/contents/barnsley/code/coconut/barnsley.coco new file mode 100644 index 000000000..1fd87c4d7 --- /dev/null +++ b/contents/barnsley/code/coconut/barnsley.coco @@ -0,0 +1,44 @@ +from random import choices +import numpy as np + +data Point(x=0, y=0): + def __rmatmul__(self, mat: np.array): + point_array = np.array([self.x, self.y, 1]) + x, y, *_ = tuple(*(mat @ point_array)) + return Point(x, y) + + +def chaos_game(Point(initial_location), hutchinson_op, probabilities): + point = initial_location + while True: + yield (point := choices(hutchinson_op, probabilities) @ point) + +barnsley_hutchinson = [ + np.array([ + [0., 0., 0.], + [0., 0.16, 0.], + [0., 0., 1.], + ]), + np.array([ + [0.85, 0.04, 0.], + [-0.04, 0.85, 1.6], + [0., 0., 1.], + ]), + np.array([ + [0.2, -0.26, 0.], + [0.23, 0.22, 1.6], + [0., 0., 1.], + ]), + np.array([ + [-0.15, 0.28, 0.], + [0.26, 0.24, 0.44], + [0., 0., 1.], + ]), +] + +barnsley_probabilities = [0.01, 0.85, 0.07, 0.07] + +if __name__ == '__main__': + output_gen = chaos_game(Point(0, 0), barnsley_hutchinson, barnsley_probabilities) + output_points = np.array([*output_gen$[:10000]]) + np.savetxt("out.dat", output_points) diff --git a/contents/barnsley/code/cpp/barnsley.cpp b/contents/barnsley/code/cpp/barnsley.cpp new file mode 100644 index 000000000..491d6bd51 --- /dev/null +++ b/contents/barnsley/code/cpp/barnsley.cpp @@ -0,0 +1,122 @@ +// The code bellow uses C++-17 features, compile it with C++-17 flags, e.g.: +// clang++ -Wall -Wextra -Wshadow -Wnon-virtual-dtor -Wold-style-cast -Wcast-align -Wunused -Woverloaded-virtual -Wpedantic -Wconversion -Wsign-conversion -Wnull-dereference -Wdouble-promotion -Wformat=2 -gdwarf-3 -D_GLIBCXX_DEBUG -std=c++17 -O3 -c ./barnsley.cpp barnsley + +#include +#include +#include +#include + +using Vec2 = std::array; +using Vec3 = std::array; +using Row = std::array; +using Op = std::array; + +constexpr auto OpN = 4U; + +template +auto operator+(std::array x, std::array y) { + for (auto i = 0U; i < N; ++i) + x[i] += y[i]; + return x; +} + +template +auto operator*(double k, std::array v) { + for (auto i = 0U; i < N; ++i) + v[i] *= k; + return v; +} + +template +auto operator*(std::array v, double k) { + return k * v; +} + +auto operator*(const Op& x, const Vec3& y) { + auto ret = Vec3{}; + for (auto i = 0U; i < 3U; ++i) { + ret[i] = 0; + for (auto j = 0U; j < 3U; ++j) + ret[i] += y[j] * x[i][j]; + } + return ret; +} + +// Returns a pseudo-random number generator +std::default_random_engine& rng() { + // Initialize static pseudo-random engine with non-deterministic random seed + static std::default_random_engine randEngine(std::random_device{}()); + return randEngine; +} + +// Returns a random double in [0, 1) +double drand() { + return std::uniform_real_distribution(0.0, 1.0)(rng()); +} + +// This is a function that reads in the Hutchinson operator and +// corresponding +// probabilities and outputs a randomly selected transform +// This works by choosing a random number and then iterating through all +// probabilities until it finds an appropriate bin +auto select_array( + const std::array& hutchinson_op, + const std::array& probabilities) { + + // random number to be binned + auto rnd = drand(); + + // This checks to see if a random number is in a bin, if not, that + // probability is subtracted from the random number and we check the + // next bin in the list + for (auto i = 0U; i < probabilities.size(); ++i) { + if (rnd < probabilities[i]) + return hutchinson_op[i]; + rnd -= probabilities[i]; + } + assert(!static_cast("check if probabilities adding up to 1")); + return hutchinson_op[0]; +} + +// This is a general function to simulate a chaos game +// n is the number of iterations +// initial_location is the the starting point of the chaos game +// hutchinson_op is the set of functions to iterate through +// probabilities is the set of probabilities corresponding to the likelihood +// of choosing their corresponding function in hutchinson_op +auto chaos_game( + size_t n, + Vec2 initial_location, + const std::array& hutchinson_op, + const std::array& probabilities) { + + // Initializing the output array and the initial point + auto output_points = std::vector{}; + + // extending point to 3D for affine transform + auto point = Vec3{initial_location[0], initial_location[1], 1}; + + for (auto i = 0U; i < n; ++i) { + output_points.push_back(Vec2{point[0], point[1]}); + point = select_array(hutchinson_op, probabilities) * point; + } + + return output_points; +} + +int main() { + + const std::array barnsley_hutchinson = { + Op{Row{0.0, 0.0, 0.0}, Row{0.0, 0.16, 0.0}, Row{0.0, 0.0, 1.0}}, + Op{Row{0.85, 0.04, 0.0}, Row{-0.04, 0.85, 1.60}, Row{0.0, 0.0, 1.0}}, + Op{Row{0.20, -0.26, 0.0}, Row{0.23, 0.22, 1.60}, Row{0.0, 0.0, 1.0}}, + Op{Row{-0.15, 0.28, 0.0}, Row{0.26, 0.24, 0.44}, Row{0.0, 0.0, 1.0}}}; + + const std::array barnsley_probabilities = {0.01, 0.85, 0.07, 0.07}; + auto output_points = chaos_game( + 10'000, Vec2{0, 0}, barnsley_hutchinson, barnsley_probabilities); + + std::ofstream ofs("out.dat"); + for (auto pt : output_points) + ofs << pt[0] << '\t' << pt[1] << '\n'; +} diff --git a/contents/barnsley/code/haskell/Barnsley.hs b/contents/barnsley/code/haskell/Barnsley.hs new file mode 100644 index 000000000..bf7024200 --- /dev/null +++ b/contents/barnsley/code/haskell/Barnsley.hs @@ -0,0 +1,40 @@ +import Data.Array (Array, bounds, elems, listArray, (!)) +import Data.List (intercalate) +import System.Random + +data Point = Point Double Double + +chaosGame :: RandomGen g => g -> Int -> Array Int (Double, (Point -> Point)) -> [Point] +chaosGame g n hutchinson = take n points + where + (x, g') = random g + (y, g'') = random g' + + cumulProbabilities = scanl1 (+) $ map fst $ elems hutchinson + to_choice x = length $ takeWhile (x >) cumulProbabilities + + picks = map to_choice $ randomRs (0, 1) g'' + step = fmap snd hutchinson + + points = Point x y : zipWith (step !) picks points + +affine :: (Double, Double, Double, Double) -> (Double, Double) -> Point -> Point +affine (xx, xy, yx, yy) (a, b) (Point x y) = Point (a + xx * x + xy * y) (b + yx * x + yy * y) + +showPoint :: Point -> String +showPoint (Point x y) = show x ++ "\t" ++ show y + +main :: IO () +main = do + g <- newStdGen + let barnsley = + listArray + (0, 3) + [ (0.01, affine (0, 0, 0, 0.16) (0, 0)), + (0.85, affine (0.85, 0.04, -0.04, 0.85) (0, 1.6)), + (0.07, affine (0.2, -0.26, 0.23, 0.22) (0, 1.6)), + (0.07, affine (-0.15, 0.28, 0.26, 0.24) (0, 0.44)) + ] + points = chaosGame g 100000 barnsley + + writeFile "out.dat" $ intercalate "\n" $ map showPoint points diff --git a/contents/barnsley/code/java/Barnsley.java b/contents/barnsley/code/java/Barnsley.java new file mode 100644 index 000000000..e18843ee2 --- /dev/null +++ b/contents/barnsley/code/java/Barnsley.java @@ -0,0 +1,98 @@ +import java.io.FileWriter; +import java.io.IOException; +import java.util.Random; + +public class Barnsley { + + private static class Point { + public double x, y, z; + + public Point(double x, double y, double z) { + this.x = x; + this.y = y; + this.z = z; + } + + public Point(double[] coordinates) { + this.x = coordinates[0]; + this.y = coordinates[1]; + this.z = coordinates[2]; + } + + public Point matrixMultiplication(double[][] matrix) { + double[] results = new double[3]; + for (int i = 0; i < 3; i++) { + results[i] = matrix[i][0] * x + matrix[i][1] * y + matrix[i][2] * z; + } + return new Point(results); + } + } + + // This is a function that reads in the Hutchinson operator and corresponding + // probabilities and outputs a randomly selected transform + // This works by choosing a random number and then iterating through all + // probabilities until it finds an appropriate bin + public static double[][] selectArray(double[][][] hutchinsonOp, double[] probabilities) { + Random rng = new Random(); + // Random number to be binned + double rand = rng.nextDouble(); + + // This checks to see if a random number is in a bin, if not, that + // probability is subtracted from the random number and we check the + // next bin in the list + for (int i = 0; i < probabilities.length; i++) { + if (rand < probabilities[i]) + return hutchinsonOp[i]; + rand -= probabilities[i]; + } + // This return will never be reached, as the loop above ensures that at some point rand will be smaller + // than a probability. However, Java does not know this and thus this return is needed for compilation. + return null; + } + + // This is a general function to simulate a chaos game + // n is the number of iterations + // initialLocation is the starting point of the chaos game + // hutchinsonOp is the set of functions to iterate through + // probabilities is the set of probabilities corresponding to the likelihood + // of choosing their corresponding function in hutchinsonOp + public static Point[] chaosGame(int n, Point initialLocation, double[][][] hutchinsonOp, double[] probabilities) { + // Initializing output points + Point[] outputPoints = new Point[n]; + Point point = initialLocation; + + for (int i = 0; i < n; i++) { + outputPoints[i] = point; + point = point.matrixMultiplication(selectArray(hutchinsonOp, probabilities)); + } + + return outputPoints; + } + + public static void main(String[] args) { + double[][][] barnsleyHutchinson = { + {{0.0, 0.0, 0.0}, + {0.0, 0.16, 0.0}, + {0.0, 0.0, 1.0}}, + {{0.85, 0.04, 0.0}, + {-0.04, 0.85, 1.60}, + {0.0, 0.0, 1.0}}, + {{0.20, -0.26, 0.0}, + {0.23, 0.22, 1.60}, + {0.0, 0.0, 1.0}}, + {{-0.15, 0.28, 0.0}, + {0.26, 0.24, 0.44}, + {0.0, 0.0, 1.0}} + }; + double[] barnsleyProbabilities = new double[]{0.01, 0.85, 0.07, 0.07}; + Point[] outputPoints = chaosGame(10000, new Point(0.0, 0.0, 1.0), barnsleyHutchinson, barnsleyProbabilities); + try (FileWriter fw = new FileWriter("barnsley.dat")) { + for (Point p : outputPoints) { + fw.write(p.x + "\t" + p.y + "\n"); + } + } catch (IOException e) { + e.printStackTrace(); + } + } + +} diff --git a/contents/barnsley/code/julia/barnsley.jl b/contents/barnsley/code/julia/barnsley.jl new file mode 100644 index 000000000..779d7a13a --- /dev/null +++ b/contents/barnsley/code/julia/barnsley.jl @@ -0,0 +1,62 @@ +using DelimitedFiles + +# This is a function that reads in the Hutchinson operator and corresponding +# probabilities and outputs a randomly selected transform +# This works by choosing a random number and then iterating through all +# probabilities until it finds an appropriate bin +function select_array(hutchinson_op, probabilities) + + # random number to be binned + rnd = rand() + + # This checks to see if a random number is in a bin, if not, that + # probability is subtracted from the random number and we check the + # next bin in the list + for i = 1:length(probabilities) + if (rnd < probabilities[i]) + return hutchinson_op[i] + end + rnd -= probabilities[i] + end +end + +# This is a general function to simulate a chaos game +# n is the number of iterations +# initial_location is the starting point of the chaos game +# hutchinson_op is the set of functions to iterate through +# probabilities is the set of probabilities corresponding to the likelihood +# of choosing their corresponding function in hutchinson_op +function chaos_game(n::Int, initial_location, hutchinson_op, probabilities) + + # Initializing the output array and the initial point + output_points = zeros(n,2) + + # extending point to 3D for affine transform + point = [initial_location[1], initial_location[2], 1] + + for i = 1:n + output_points[i,:] .= point[1:2] + point = select_array(hutchinson_op, probabilities)*point + end + + return output_points + +end + +barnsley_hutchinson = [[0.0 0.0 0.0; + 0.0 0.16 0.0; + 0.0 0.0 1.0], + [0.85 0.04 0.0; + -0.04 0.85 1.60; + 0.0 0.0 1.0], + [0.20 -0.26 0.0; + 0.23 0.22 1.60; + 0.0 0.0 1.0], + [-0.15 0.28 0.0; + 0.26 0.24 0.44; + 0.0 0.0 1.0]] + +barnsley_probabilities = [0.01, 0.85, 0.07, 0.07] +output_points = chaos_game(10000, [0,0], + barnsley_hutchinson, barnsley_probabilities) +writedlm("out.dat", output_points) diff --git a/contents/barnsley/code/rust/Cargo.toml b/contents/barnsley/code/rust/Cargo.toml new file mode 100644 index 000000000..52505634b --- /dev/null +++ b/contents/barnsley/code/rust/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "barnsley" +version = "0.1.0" +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +rand = "0.8.4" + +[[bin]] +path = "./barnsley.rs" +name = "main" \ No newline at end of file diff --git a/contents/barnsley/code/rust/barnsley.rs b/contents/barnsley/code/rust/barnsley.rs new file mode 100644 index 000000000..6fee75375 --- /dev/null +++ b/contents/barnsley/code/rust/barnsley.rs @@ -0,0 +1,105 @@ +use rand::prelude::*; +#[derive(Clone, Copy)] +struct Point2 { + x: f64, + y: f64, +} + +#[derive(Clone, Copy)] +struct Point3 { + x: f64, + y: f64, + z: f64, +} + +impl Point3 { + fn new(x: f64, y: f64, z: f64) -> Self { + Self { x, y, z } + } + + fn matrix_mul(self, rhs: Vec) -> Self { + let x = rhs[0].x * self.x + rhs[0].y * self.y + rhs[0].z * self.z; + let y = rhs[1].x * self.x + rhs[1].y * self.y + rhs[1].z * self.z; + let z = rhs[2].x * self.x + rhs[2].y * self.y + rhs[2].z * self.z; + Self::new(x, y, z) + } +} + +fn select_array(hutchinson_op: &[Vec], probabilities: &[f64]) -> Vec { + let mut rng = rand::thread_rng(); + let mut rnd = rng.gen::(); + + for (i, probability) in probabilities.iter().enumerate() { + if rnd < *probability { + return hutchinson_op[i].clone(); + } + rnd -= probability; + } + + return vec![]; +} + +fn chaos_game( + iters: usize, + initial_location: Point2, + hutchinson_op: &[Vec], + probabilities: &[f64], +) -> Vec { + let mut point = Point3 { + x: initial_location.x, + y: initial_location.y, + z: 1.0, + }; + (0..iters) + .into_iter() + .map(|_| { + let old_point = point; + let operation = select_array(hutchinson_op, probabilities); + point = point.matrix_mul(operation); + Point2 { + x: old_point.x, + y: old_point.y, + } + }) + .collect() +} + +fn main() { + let barnsley_hutchinson = vec![ + vec![ + Point3::new(0.0, 0.0, 0.0), + Point3::new(0.0, 0.16, 0.0), + Point3::new(0.0, 0.0, 1.0), + ], + vec![ + Point3::new(0.85, 0.04, 0.0), + Point3::new(-0.04, 0.85, 1.60), + Point3::new(0.0, 0.0, 1.0), + ], + vec![ + Point3::new(0.20, -0.26, 0.0), + Point3::new(0.23, 0.22, 1.60), + Point3::new(0.0, 0.0, 1.0), + ], + vec![ + Point3::new(-0.15, 0.28, 0.0), + Point3::new(0.26, 0.24, 0.44), + Point3::new(0.0, 0.0, 1.0), + ], + ]; + + let barnsley_probabilities = vec![0.01, 0.85, 0.07, 0.07]; + + let mut out = String::new(); + + for point in chaos_game( + 10_000, + Point2 { x: 0.0, y: 0.0 }, + &barnsley_hutchinson, + &barnsley_probabilities, + ) { + out += format!("{}\t{}\n", point.x, point.y).as_str(); + } + + std::fs::write("./out.dat", out).unwrap(); +} diff --git a/contents/barnsley/res/affine_fern_0.mp4 b/contents/barnsley/res/affine_fern_0.mp4 new file mode 100644 index 000000000..024311faf Binary files /dev/null and b/contents/barnsley/res/affine_fern_0.mp4 differ diff --git a/contents/barnsley/res/affine_fern_1.mp4 b/contents/barnsley/res/affine_fern_1.mp4 new file mode 100644 index 000000000..cf8c101e0 Binary files /dev/null and b/contents/barnsley/res/affine_fern_1.mp4 differ diff --git a/contents/barnsley/res/affine_fern_2.mp4 b/contents/barnsley/res/affine_fern_2.mp4 new file mode 100644 index 000000000..b82b0bdeb Binary files /dev/null and b/contents/barnsley/res/affine_fern_2.mp4 differ diff --git a/contents/barnsley/res/affine_fern_3.mp4 b/contents/barnsley/res/affine_fern_3.mp4 new file mode 100644 index 000000000..def5adb76 Binary files /dev/null and b/contents/barnsley/res/affine_fern_3.mp4 differ diff --git a/contents/barnsley/res/affine_rnd_0.mp4 b/contents/barnsley/res/affine_rnd_0.mp4 new file mode 100644 index 000000000..52d28c16f Binary files /dev/null and b/contents/barnsley/res/affine_rnd_0.mp4 differ diff --git a/contents/barnsley/res/affine_rnd_1.mp4 b/contents/barnsley/res/affine_rnd_1.mp4 new file mode 100644 index 000000000..2c8cfe530 Binary files /dev/null and b/contents/barnsley/res/affine_rnd_1.mp4 differ diff --git a/contents/barnsley/res/affine_rnd_2.mp4 b/contents/barnsley/res/affine_rnd_2.mp4 new file mode 100644 index 000000000..8b258dd0b Binary files /dev/null and b/contents/barnsley/res/affine_rnd_2.mp4 differ diff --git a/contents/barnsley/res/affine_rnd_3.mp4 b/contents/barnsley/res/affine_rnd_3.mp4 new file mode 100644 index 000000000..4d8d50344 Binary files /dev/null and b/contents/barnsley/res/affine_rnd_3.mp4 differ diff --git a/contents/barnsley/res/fern_twiddle_0.mp4 b/contents/barnsley/res/fern_twiddle_0.mp4 new file mode 100644 index 000000000..dd96ec1b2 Binary files /dev/null and b/contents/barnsley/res/fern_twiddle_0.mp4 differ diff --git a/contents/barnsley/res/fern_twiddle_1.mp4 b/contents/barnsley/res/fern_twiddle_1.mp4 new file mode 100644 index 000000000..e80bc2fa9 Binary files /dev/null and b/contents/barnsley/res/fern_twiddle_1.mp4 differ diff --git a/contents/barnsley/res/fern_twiddle_2.mp4 b/contents/barnsley/res/fern_twiddle_2.mp4 new file mode 100644 index 000000000..46aa999f5 Binary files /dev/null and b/contents/barnsley/res/fern_twiddle_2.mp4 differ diff --git a/contents/barnsley/res/fern_twiddle_3.mp4 b/contents/barnsley/res/fern_twiddle_3.mp4 new file mode 100644 index 000000000..244da3843 Binary files /dev/null and b/contents/barnsley/res/fern_twiddle_3.mp4 differ diff --git a/contents/barnsley/res/full_fern.png b/contents/barnsley/res/full_fern.png new file mode 100644 index 000000000..26666ab60 Binary files /dev/null and b/contents/barnsley/res/full_fern.png differ diff --git a/contents/bitlogic/bitlogic.md b/contents/bitlogic/bitlogic.md index f336dd7d2..dc4517395 100644 --- a/contents/bitlogic/bitlogic.md +++ b/contents/bitlogic/bitlogic.md @@ -96,37 +96,37 @@ These operations are called *gates*, and follow somewhat straightforward logic. The *AND* gate, for example, reads in 2 bits and will only output a 1 value if both inputs are 1. This can be seen in the corresponding truth table:

- +

The *OR* gate will output 1 if either input bits are 1:

- +

The *exclusive OR* or *XOR* gate is the same as the *OR* gate, but will not output 1 if both bits are 1:

- +

The *NOT* gate simply flips the input bit:

- +

By combining the NOT and AND gates, we get the *NAND* gate:

- +

And NOT and OR create *NOR*:

- +

There are a few other gates, but this is enough for most things. We'll add more as the need arises! @@ -137,7 +137,7 @@ That's about it for bitlogic. I realize it was a bit long, but this is absolutel Here is a video describing the contents of this chapter:
- +
- -## License - -##### Code Examples - -The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/master/LICENSE.md)). - -##### Text - -The text of this chapter was written by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). - -[

](https://creativecommons.org/licenses/by-sa/4.0/) - -##### Pull Requests - -After initial licensing ([#560](https://github.com/algorithm-archivists/algorithm-archive/pull/560)), the following pull requests have modified the text or graphics of this chapter: -- none diff --git a/contents/bogo_sort/code/asm-x64/bogo_sort.s b/contents/bogo_sort/code/asm-x64/bogo_sort.s deleted file mode 100644 index 3980f821b..000000000 --- a/contents/bogo_sort/code/asm-x64/bogo_sort.s +++ /dev/null @@ -1,147 +0,0 @@ -.intel_syntax noprefix - -.section .rodata - array: - .align 16 - .int 1, 3654, 78, 654, -234, -12, 4, 3, -6, -100 - .equ array_len, (.-array) / 4 - array_fmt: .string "%d " - lf: .string "\n" - unsorted: .string "Unsorted array: " - sorted: .string "Sorted array: " - -.section .text - .global main - .extern printf, rand, srand - -# rdi - array ptr -# rsi - array size -print_array: - push r12 - push r13 - mov r12, rdi # Loop variable - lea r13, [rdi + 4 * rsi] # Pointer after the last element -print_array_loop: - cmp r12, r13 # If we're done iterating over the array then bail - jge print_array_done - mov rdi, OFFSET array_fmt # Otherwise print the current value - mov esi, DWORD PTR [r12] - xor rax, rax - call printf - lea r12, [r12 + 4] # And increment the loop variable pointer - jmp print_array_loop -print_array_done: - mov rdi, OFFSET lf # Print a closing newline - xor rax, rax - call printf - pop r13 - pop r12 - ret - -# rdi - array ptr -# rsi - array size -# RET rax - boolean -is_sorted: - sub rsi, 1 # Getting array + n and *array + n - 1 - lea rcx, [rsi - 1] - lea rcx, [rdi + 4 * rcx] - lea rsi, [rdi + 4 * rsi] -is_sorted_loop: - cmp rsi, rdi # Check if array + n - 1 == array - je is_sorted_done - mov edx, DWORD PTR [rsi] # Load value to register - xor rax, rax # Set rax to 0 - cmp edx, DWORD PTR [rcx] # Check if array[n] < array[n - 1] - jl is_sorted_return - sub rcx, 4 # If not make pointers go to down an element - sub rsi, 4 - jmp is_sorted_loop -is_sorted_done: - mov rax, 1 # If sorted then set rax to 1 -is_sorted_return: - ret # Return - -# rdi - array ptr -# rsi - array size -shuffle: - push r12 - push r13 - push r14 - push r15 - mov r12, rdi # Save parameters - mov r13, rsi - xor r14, r14 -shuffle_loop: - cmp r14, r13 # Check if i == array size - je shuffle_done - mov r15d, DWORD PTR [r12 + r14 * 4] # Save array[i] - call rand # Swap a random element with array[i] - xor edx, edx - div r13 # Mod random number to keep in array - mov eax, DWORD PTR [r12 + rdx * 4] - mov DWORD PTR [r12 + r14 * 4], eax - mov DWORD PTR [r12 + rdx * 4], r15d - add r14, 1 # increment then repeat - jmp shuffle_loop -shuffle_done: - pop r15 - pop r14 - pop r13 - pop r12 - ret - -# rdi - array ptr -# rsi - array size -bogo_sort: - push r12 - push r13 - mov r12, rdi - mov r13, rsi -bogo_sort_loop: - mov rdi, r12 # Check if the array is sorted - mov rsi, r13 - call is_sorted - test rax, rax - jnz bogo_sort_done - mov rdi, r12 # If not then shuffle - mov rsi, r13 - call shuffle - jmp bogo_sort_loop -bogo_sort_done: - pop r13 - pop r12 - ret - -main: - # Set up our stack - sub rsp, 40 - # We load the array in chunks onto the stack - movaps xmm0, XMMWORD PTR [array] - movaps XMMWORD PTR [rsp], xmm0 - movaps xmm0, XMMWORD PTR [array + 16] - movaps XMMWORD PTR [rsp + 16], xmm0 - mov rax, QWORD PTR [array + 32] - mov QWORD PTR [rsp + 32], rax - # Print the unsorted array - mov rdi, OFFSET unsorted - xor rax, rax - call printf - mov rdi, rsp - mov rsi, array_len - call print_array - # Sort - mov rdi, rsp - mov rsi, array_len - call bogo_sort - # Print the sorted array - mov rdi, OFFSET sorted - xor rax, rax - call printf - mov rdi, rsp - mov rsi, array_len - call print_array - # Restore the stack pointer, set return value to 0 - add rsp, 40 - xor rax, rax - ret - diff --git a/contents/bogo_sort/code/bash/bogo_sort.bash b/contents/bogo_sort/code/bash/bogo_sort.bash deleted file mode 100755 index 43135075f..000000000 --- a/contents/bogo_sort/code/bash/bogo_sort.bash +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env bash -is_sorted() { - local arr - local len - local i - local sorted - arr=("$@") - (( len = ${#arr[@]} - 1)) - (( sorted = 1 )) - - for (( i = len; i > 0; i-- )); do - if (( arr[i] < arr[(( i - 1 ))] )); then - (( sorted = 0 )) - break - fi - done - printf "%d" $sorted -} - -shuffle() { - local arr - local len - local i - local tmp - local rand - arr=("$@") - (( len = ${#arr[@]} )) - - for (( i = 0; i < len; i++ )); do - (( rand = RANDOM % len )) - (( tmp = arr[rand] )) - (( arr[rand] = arr[i] )) - (( arr[i] = tmp )) - done - echo ${arr[*]} -} - -bogo_sort() { - local arr - arr=("$@") - while [[ $(is_sorted "${arr[@]}") == 0 ]]; do - arr=($(shuffle "${arr[@]}")) - done - echo ${arr[*]} -} - -arr=(1 3654 78 654 -234 -12 4 3 -6 -100) -echo "Unsorted array: ${arr[*]}" -arr=("$(bogo_sort "${arr[@]}")") -echo "Sorted array: ${arr[*]}" diff --git a/contents/bogo_sort/code/c++/bogosort.cpp b/contents/bogo_sort/code/c++/bogosort.cpp deleted file mode 100644 index f4ce66934..000000000 --- a/contents/bogo_sort/code/c++/bogosort.cpp +++ /dev/null @@ -1,51 +0,0 @@ -#include -#include -#include -#include -#include - -using std::begin; -using std::end; - -template -std::vector generate_input(std::size_t size, Rng& rng) { - auto dist = std::uniform_real_distribution<>(0.0, 1.0); - - auto ret = std::vector(); - std::generate_n(std::back_inserter(ret), size, - [&rng, &dist] { return dist(rng); }); - - return ret; -} - -template -void print_range(std::ostream& os, Iter const first, Iter const last) { - os << '{'; - - if (first != last) { - os << *first; - std::for_each(first + 1, last, [&os] (double d) { os << ", " << d; }); - } - - os << "}\n"; -} - -template -void bogo_sort(Iter const first, Iter const last, Rng& rng) { - while (not std::is_sorted(first, last)) { - std::shuffle(first, last, rng); - } -} - -int main() { - std::random_device random_device; - auto rng = std::mt19937(random_device()); - - auto input = generate_input(5, rng); - - print_range(std::cout, begin(input), end(input)); - - bogo_sort(begin(input), end(input), rng); - - print_range(std::cout, begin(input), end(input)); -} diff --git a/contents/bogo_sort/code/c/bogo_sort.c b/contents/bogo_sort/code/c/bogo_sort.c deleted file mode 100644 index 0081a86a8..000000000 --- a/contents/bogo_sort/code/c/bogo_sort.c +++ /dev/null @@ -1,47 +0,0 @@ -#include -#include -#include -#include - -bool is_sorted(int *array, size_t n) { - while (--n >= 1) { - if (array[n] < array[n - 1]) { - return false; - } - } - - return true; -} - -void shuffle(int *array, size_t n) { - for (size_t i = 0; i < n; ++i) { - int t = array[i]; - int r = rand() % n; - array[i] = array[r]; - array[r] = t; - } -} - -void bogo_sort(int *array, size_t n) { - while (!is_sorted(array, n)) { - shuffle(array, n); - } -} - -int main() { - int array[10] = {1, 3654, 78, 654, -234, -12, 4, 3, -6, -100}; - - printf("Unsorted array:\n"); - for (int i = 0; i < 10; ++i) { - printf("%d ", array[i]); - } - printf("\n\n"); - - bogo_sort(array, 10); - - printf("Sorted array:\n"); - for (int i = 0; i < 10; ++i) { - printf("%d ", array[i]); - } - printf("\n"); -} diff --git a/contents/bogo_sort/code/clisp/bogo-sort.lisp b/contents/bogo_sort/code/clisp/bogo-sort.lisp deleted file mode 100644 index dae143bab..000000000 --- a/contents/bogo_sort/code/clisp/bogo-sort.lisp +++ /dev/null @@ -1,26 +0,0 @@ -;;;; Bogo sort implementation - -(defun sortedp (list) - "Checks if a list is sorted" - (if (< (length list) 2) - t - (if (<= (first list) (second list)) - (sortedp (rest list)) - nil))) - -(defun shuffle (list) - "Returns a shuffled list using the Fisher-Yates method" - (loop for i from (1- (length list)) downto 0 - do - (rotatef - (nth i list) - (nth (random (1+ i)) list)) - finally (return list))) - -(defun bogo-sort (list) - "Sorts a given list (eventually)" - (if (sortedp list) - list - (bogo-sort (shuffle list)))) - -(print (bogo-sort (list 1 3 2 4))) diff --git a/contents/bogo_sort/code/clojure/bogo.clj b/contents/bogo_sort/code/clojure/bogo.clj deleted file mode 100644 index d9f575251..000000000 --- a/contents/bogo_sort/code/clojure/bogo.clj +++ /dev/null @@ -1,11 +0,0 @@ -;; earthfail -(defn is-sorted [col func] - "return true of col is sorted in respect to func role - like <,>,<=,>=" - (apply func col)) - -(defn bogo-sort [col func] - "shuffle the collection untill it is sorted" - (if (is-sorted col func) - col - (shuffle col))) diff --git a/contents/bogo_sort/code/crystal/bogo.cr b/contents/bogo_sort/code/crystal/bogo.cr deleted file mode 100644 index 8d622d567..000000000 --- a/contents/bogo_sort/code/crystal/bogo.cr +++ /dev/null @@ -1,22 +0,0 @@ -def is_sorted?(a) - 0.upto(a.size - 2) do |i| - if a[i] > a[i + 1] - return false - end - end - true -end - -def bogo_sort!(a) - while !is_sorted?(a) - a.shuffle! - end -end - -def main - a = [1.0, 3.0, 2.0, 4.0] - bogo_sort!(a) - puts a -end - -main diff --git a/contents/bogo_sort/code/csharp/BogoSort.cs b/contents/bogo_sort/code/csharp/BogoSort.cs deleted file mode 100644 index 3d5e981c3..000000000 --- a/contents/bogo_sort/code/csharp/BogoSort.cs +++ /dev/null @@ -1,51 +0,0 @@ -// submitted by Julian Schacher (jspp) -using System; -using System.Collections.Generic; - -namespace BogoSort -{ - public static class BogoSort - { - public static List RunBogoSort(List list) where T : IComparable - { - while (!IsSorted(list)) - list = Shuffle(list, new Random()); - - return list; - } - - private static bool IsSorted(List list) where T : IComparable - { - var sorted = true; - - for (int i = 0; i < list.Count - 1; i++) - { - if (!(0 >= list[i].CompareTo(list[i + 1]))) - sorted = false; - } - if (!sorted) - { - sorted = true; - for (int i = 0; i < list.Count - 1; i++) - { - if (!(0 <= list[i].CompareTo(list[i + 1]))) - sorted = false; - } - } - - return sorted; - } - - private static List Shuffle(List list, Random random) - { - for (int i = list.Count - 1; i > 0; i--) - { - var j = random.Next(0, i); - var temp = list[i]; - list[i] = list[j]; - list[j] = temp; - } - return list; - } - } -} diff --git a/contents/bogo_sort/code/csharp/Program.cs b/contents/bogo_sort/code/csharp/Program.cs deleted file mode 100644 index 66cc135e6..000000000 --- a/contents/bogo_sort/code/csharp/Program.cs +++ /dev/null @@ -1,24 +0,0 @@ -// submitted by Julian Schacher (jspp) -using System; -using System.Collections.Generic; - -namespace BogoSort -{ - class Program - { - static void Main(string[] args) - { - Console.WriteLine("BogoSort"); - var listBogo = new List() { 1, 2, 6, 4, 9, 54, 3, 2, 7, 15 }; - Console.Write("unsorted: "); - foreach (var number in listBogo) - Console.Write(number + " "); - Console.WriteLine(); - listBogo = BogoSort.RunBogoSort(listBogo); - Console.Write("sorted: "); - foreach (var number in listBogo) - Console.Write(number + " "); - Console.WriteLine(); - } - } -} diff --git a/contents/bogo_sort/code/emojicode/bogo_sort.emojic b/contents/bogo_sort/code/emojicode/bogo_sort.emojic deleted file mode 100644 index ce95e1b84..000000000 --- a/contents/bogo_sort/code/emojicode/bogo_sort.emojic +++ /dev/null @@ -1,32 +0,0 @@ -🐇 🥇 🍇 - 🐇 ❗️ 🔀 numbers 🍨🐚💯🍆 🍇 - 🔁 ❎ 📶🐇🥇 numbers❗️❗️ 🍇 - 🐹 numbers❗️ - 🍉 - 🍉 - - 🐇 ❗️ 📶 numbers 🍨🐚💯🍆 ➡️ 👌 🍇 - 🔂 i 🆕⏩⏩️ 1 🐔 numbers❗️❗️ 🍇 - ↪️ 🐽 numbers i ➖ 1❗️ ▶️ 🐽 numbers i❗️ 🍇 - ↩️ 👎 - 🍉 - 🍉 - ↩️ 👍 - 🍉 -🍉 - -🏁 🍇 - 🍨 1.7 -3.0 2.5 2.0 7.0 1.5 -4.3 0.3 🍆 ➡️ numbers - - 😀 🔤unordered:🔤❗️ - 🔂 number numbers 🍇 - 😀 🔡 number 10❗️❗️ - 🍉 - - 🔀🐇🥇 numbers❗️ - - 😀 🔤ordered:🔤❗️ - 🔂 number numbers 🍇 - 😀 🔡 number 10❗️❗️ - 🍉 -🍉 diff --git a/contents/bogo_sort/code/factor/bogo_sort.factor b/contents/bogo_sort/code/factor/bogo_sort.factor deleted file mode 100644 index 44515b56a..000000000 --- a/contents/bogo_sort/code/factor/bogo_sort.factor +++ /dev/null @@ -1,25 +0,0 @@ -! There's no built-in "is sorted" function, so let's make one: -USING: locals ; -: sorted? ( seq -- ? ) - 2 clump ! split it up into overlapping pairs - ! so now, for example, { 1 2 3 } has turned into { { 1 2 } { 2 3 } } - ! and now we make sure that for every pair, the latter is >= the former - [| pair | pair first pair last <= ] all? -; - -USING: random ; -: bogosort ( seq -- seq' ) - ! `dup` duplicates the array, because `sorted?` pops its reference to it - ! randomize works in-place - ! so we `randomize` `until` it's `sorted?` - [ dup sorted? ] [ randomize ] until -; - -! WARNING: Increasing this number beyond 5 or so will make this take a very long time. -! That said, if you have an afternoon to kill... -5 >array randomize ! generate a random array to demo -dup . ! show the array -bogosort ! bogosort it -. ! show it again - - diff --git a/contents/bogo_sort/code/fortran/bogo.f90 b/contents/bogo_sort/code/fortran/bogo.f90 deleted file mode 100644 index 87a7ab9ff..000000000 --- a/contents/bogo_sort/code/fortran/bogo.f90 +++ /dev/null @@ -1,48 +0,0 @@ -PROGRAM bogo - IMPLICIT NONE - REAL(8), DIMENSION(5) :: array - - array = (/ 1d0, 1d0, 0d0, 3d0, 7d0 /) - - CALL bogo_sort(array) - - WRITE(*,*) array - -contaINs - - LOGICAL FUNCTION is_sorted(array) - REAL(8), DIMENSION(:), INTENT(IN) :: array - INTEGER :: i - - DO i = 1, SIZE(array) - IF (array(i+1) < array(i)) THEN - is_sorted = .FALSE. - END IF - END DO - END FUNCTION is_sorted - - SUBROUTINE bogo_sort(array) - REAL(8), DIMENSION(:), INTENT(INOUT) :: array - - DO WHILE (is_sorted(array) .EQV. .FALSE.) - - CALL shuffle(array) - - END DO - END SUBROUTINE bogo_sort - - SUBROUTINE shuffle(array) - REAL(8), DIMENSION(:), INTENT(INOUT) :: array - INTEGER :: i, randpos - REAL(8) :: r, temp - - DO i = size(array), 2, -1 - CALL RANDOM_NUMBER(r) - randpos = INT(r * i) + 1 - temp = array(randpos) - array(randpos) = array(i) - array(i) = temp - END DO - - END SUBROUTINE shuffle -END PROGRAM bogo diff --git a/contents/bogo_sort/code/go/bogo_sort.go b/contents/bogo_sort/code/go/bogo_sort.go deleted file mode 100644 index a8297fc50..000000000 --- a/contents/bogo_sort/code/go/bogo_sort.go +++ /dev/null @@ -1,38 +0,0 @@ -// Submitted by Christopher Milan (christopherm99) - -package main - -import ( - "fmt" - "math/rand" - "time" -) - -func shuffle(a *[]int) { - for i := len(*a) - 1; i > 0; i-- { - j := rand.Intn(i + 1) - (*a)[i], (*a)[j] = (*a)[j], (*a)[i] - } -} - -func isSorted(a []int) bool { - for i := 0; i < len(a)-1; i++ { - if a[i+1] < a[i] { - return false - } - } - return true -} - -func bogoSort(a *[]int) { - for !isSorted(*a) { - shuffle(a) - } -} - -func main() { - rand.Seed(time.Now().UnixNano()) - a := []int{1, 3, 4, 2} - bogoSort(&a) - fmt.Println(a) -} diff --git a/contents/bogo_sort/code/haskell/bogoSort.hs b/contents/bogo_sort/code/haskell/bogoSort.hs deleted file mode 100644 index 328e192cc..000000000 --- a/contents/bogo_sort/code/haskell/bogoSort.hs +++ /dev/null @@ -1,24 +0,0 @@ -import System.Random -import qualified Data.Map as M -import Data.Map ((!)) - -fisherYates :: RandomGen g => [a] -> g -> ([a], g) -fisherYates a gen = shuffle 1 gen m - where m = M.fromList $ zip [1..] a - shuffle i g k - | i == M.size m = (M.elems k, g) - | otherwise = let (j, g') = randomR (i, M.size m) g - k' = M.insert i (k!j) $ M.insert j (k!i) k - in shuffle (i+1) g' k' - -isSorted :: Ord a => [a] -> Bool -isSorted = all (uncurry (<=)) . (zip <*> tail) - -bogoSort :: (Ord a, RandomGen g) => g -> [a] -> [a] -bogoSort g a = fst $ head $ - filter (isSorted . fst) $ - iterate (uncurry fisherYates) (a, g) - -main = do - g <- newStdGen - print $ bogoSort g [9, 4, 3, 2, 5, 8, 6, 1, 7] diff --git a/contents/bogo_sort/code/java/Bogo.java b/contents/bogo_sort/code/java/Bogo.java deleted file mode 100644 index 556543641..000000000 --- a/contents/bogo_sort/code/java/Bogo.java +++ /dev/null @@ -1,46 +0,0 @@ -public class Bogo { - static void bogoSort(int[] arr) { - while(!isSorted(arr)) { - shuffle(arr); - } - } - - static boolean isSorted(int[] arr) { - for (int i = 0; i < arr.length - 1; i++) { - if(arr[i] > arr[i + 1]) { - return false; - } - } - - return true; - } - - static void shuffle(int[] arr) { - for (int r = arr.length - 1; r > 0; r--) { - int i = (int) Math.floor(Math.random() * r); - int tmp = arr[i]; - arr[i] = arr[r]; - arr[r] = tmp; - } - } - - - public static void main(String[] args) { - int[] test = new int[]{20, -3, 50, 1, -6, 59}; - - System.out.println("Unsorted array :"); - for (int i = 0; i < test.length; i++) { - System.out.print(test[i] + " "); - } - - - bogoSort(test); - - - System.out.println("\n\nSorted array :"); - for (int i = 0; i < test.length; i++) { - System.out.print(test[i] + " "); - } - System.out.println(""); - } -} diff --git a/contents/bogo_sort/code/javascript/bogo.js b/contents/bogo_sort/code/javascript/bogo.js deleted file mode 100644 index 2559632e8..000000000 --- a/contents/bogo_sort/code/javascript/bogo.js +++ /dev/null @@ -1,32 +0,0 @@ -function isSorted(arr) { - for (let i = 0; i < arr.length - 1; i++) { - if (arr[i] > arr[i + 1]) { - return false; - } - } - - return true; -} - -function bogoSort(arr) { - while (!isSorted(arr)) { - shuffle(arr); - } -} - -function shuffle(arr) { - for (let r = arr.length -1; r > 0; r--) { - let i = Math.floor(Math.random() * r); - let tmp = arr[i]; - arr[i] = arr[r]; - arr[r] = tmp; - } -} - -function main() { - const testArray = [4, 5, 123, 24, 34, -5]; - bogoSort(testArray); - console.log(testArray); -} - -main(); diff --git a/contents/bogo_sort/code/julia/bogo.jl b/contents/bogo_sort/code/julia/bogo.jl deleted file mode 100644 index d07cdb6a7..000000000 --- a/contents/bogo_sort/code/julia/bogo.jl +++ /dev/null @@ -1,24 +0,0 @@ -using Random - -function is_sorted(a::Vector{Float64}) - for i = 1:length(a)-1 - if (a[i] > a[i + 1]) - return false - end - end - return true -end - -function bogo_sort!(a::Vector{Float64}) - while(!is_sorted(a)) - shuffle!(a) - end -end - -function main() - a = [1.0, 3.0, 2.0, 4.0] - bogo_sort!(a) - println(a) -end - -main() diff --git a/contents/bogo_sort/code/lua/bogosort.lua b/contents/bogo_sort/code/lua/bogosort.lua deleted file mode 100644 index f66753535..000000000 --- a/contents/bogo_sort/code/lua/bogosort.lua +++ /dev/null @@ -1,28 +0,0 @@ -local function shuffle(arr) - for i = 1, #arr-1 do - local rand = math.random(i,#arr) - arr[i], arr[rand] = arr[rand], arr[i] - end -end - -local function issorted(arr) - for i = 1,#arr-1 do - if arr[i] > arr[i+1] then - return false - end - end - return true -end - -function bogosort(arr) - while not issorted(arr) do - shuffle(arr) - end -end - -local arr = {1, 45, 756, 4569, 56, 3, 8, 5, -10, -4} -print(("Unsorted array: {%s}"):format(table.concat(arr,", "))) - -bogosort(arr) - -print(("Sorted array: {%s}"):format(table.concat(arr,", "))) diff --git a/contents/bogo_sort/code/matlab/bogosort.m b/contents/bogo_sort/code/matlab/bogosort.m deleted file mode 100644 index eeff29adb..000000000 --- a/contents/bogo_sort/code/matlab/bogosort.m +++ /dev/null @@ -1,30 +0,0 @@ -function main() - array = floor( rand(1,7)*100 ); - disp('Before Sorting:') - disp(array) - - array = bogo_sort(array); - disp('After Sorting') - disp(array) -end - -function retval = is_sorted(array) - for i=1:length(array)-1 - if array(i) > array(i+1) - retval = false; - return - end - end - retval = true; -end - -function sorted_array = bogo_sort(array) - while ~is_sorted(array) - % create a list of random permutation indices - i = randperm(length(array)); - array = array(i); - end - sorted_array = array; -end - - diff --git a/contents/bogo_sort/code/nim/bogo_sort.nim b/contents/bogo_sort/code/nim/bogo_sort.nim deleted file mode 100644 index aca98f749..000000000 --- a/contents/bogo_sort/code/nim/bogo_sort.nim +++ /dev/null @@ -1,26 +0,0 @@ -import random - -randomize() - -proc print_array(a: openArray[int]) = - for n in 0 .. len(a)-1: - echo a[n] - -proc is_sorted(a: openArray[int]): bool = - for n in 1 .. len(a)-1: - if a[n] > a[n-1]: - return false - - return true - -proc bogo_sort(a: var openArray[int]) = - while not is_sorted(a): - shuffle(a) - - -var x: array[10,int] = [32,32,64,16,128,8,256,4,512,2] - -print_array(x) -bogo_sort(x) -echo "\n" -print_array(x) diff --git a/contents/bogo_sort/code/php/bogo_sort.php b/contents/bogo_sort/code/php/bogo_sort.php deleted file mode 100644 index 41ceb5b99..000000000 --- a/contents/bogo_sort/code/php/bogo_sort.php +++ /dev/null @@ -1,31 +0,0 @@ - $array[$i + 1]) { - return false; - } - } - - return true; -} - -function bogo_sort(array $array): array -{ - while (!is_sorted($array)) { - shuffle($array); - } - - return $array; -} - - -$unsorted = [10, 7, 3, 1, 4, 8, 5, 6, 9, 2]; -$bogo_sorted = bogo_sort($unsorted); - -printf('Unsorted: %s', implode(',', $unsorted)); -echo PHP_EOL; -printf('Sorted: %s', implode(',', $bogo_sorted)); -echo PHP_EOL; diff --git a/contents/bogo_sort/code/python/bogo.py b/contents/bogo_sort/code/python/bogo.py deleted file mode 100644 index 995a0ce8c..000000000 --- a/contents/bogo_sort/code/python/bogo.py +++ /dev/null @@ -1,20 +0,0 @@ -import random - - -def is_sorted(a): - for i in range(len(a)-1): - if a[i+1] < a[i]: - return False - return True - -def bogo_sort(a): - while not is_sorted(a): - random.shuffle(a) - -def main(): - a = [1, 3, 2, 4] - bogo_sort(a) - print(a) - -main() - diff --git a/contents/bogo_sort/code/r/bogo_sort.r b/contents/bogo_sort/code/r/bogo_sort.r deleted file mode 100755 index 0d354de27..000000000 --- a/contents/bogo_sort/code/r/bogo_sort.r +++ /dev/null @@ -1,15 +0,0 @@ -bogo_sort <- function(a) { - while(is.unsorted(a)) { - a <- sample(a) - } - return(a) -} - -test <- c(20, -3, 50, 1, -6, 59) - -print("unsorted list") -print(test) - -print("sorted list") -print(bogo_sort(test)) - diff --git a/contents/bogo_sort/code/racket/bogo_sort.rkt b/contents/bogo_sort/code/racket/bogo_sort.rkt deleted file mode 100755 index dfc3b1c70..000000000 --- a/contents/bogo_sort/code/racket/bogo_sort.rkt +++ /dev/null @@ -1,24 +0,0 @@ -#lang racket - -(define (bogo_sort l) - (if (is_sorted? l) - l - (bogo_sort (shuffle l)) - ) - ) - -(define (is_sorted? l) - (if (> (length l) 1) - (if (> (first l) (second l)) - false - (is_sorted? (rest l)) - ) - true - ) - ) - -(define unsorted_list '(20 -3 50 1 -6 59)) -(display "unsorted list: ") -(displayln unsorted_list) -(display "sorted list: ") -(displayln (bogo_sort unsorted_list)) diff --git a/contents/bogo_sort/code/ruby/bogo.rb b/contents/bogo_sort/code/ruby/bogo.rb deleted file mode 100644 index 13e4ae447..000000000 --- a/contents/bogo_sort/code/ruby/bogo.rb +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env ruby - -def is_sorted(a) - a.each_cons(2).all? { |(l, r)| l <= r } -end - -def bogo_sort(a) - a.shuffle! until is_sorted a -end - -a = [1, 1, 0, 3, 7] - -puts "Unsorted" -p a - -bogo_sort a - -puts "Sorted" -p a diff --git a/contents/bogo_sort/code/rust/bogosort.rs b/contents/bogo_sort/code/rust/bogosort.rs deleted file mode 100644 index 78c12323f..000000000 --- a/contents/bogo_sort/code/rust/bogosort.rs +++ /dev/null @@ -1,28 +0,0 @@ -// Submitted by jess 3jane - -extern crate rand; - -use rand::{thread_rng, Rng}; - -fn is_sorted(arr: &[i32]) -> bool { - for i in 1..arr.len() { - if arr[i - 1] > arr[i] { - return false; - } - } - true -} - -fn bogo_sort(arr: &mut [i32]) { - while !is_sorted(arr) { - thread_rng().shuffle(arr); - } -} - -fn main() { - let mut v = vec![1, 2, 3, 4, 5]; - thread_rng().shuffle(&mut v); - println!("Original array: {:?}", v); - bogo_sort(&mut v); - println!("Sorted array: {:?}", v); -} diff --git a/contents/bogo_sort/code/scala/bogo.scala b/contents/bogo_sort/code/scala/bogo.scala deleted file mode 100644 index 25bd9d729..000000000 --- a/contents/bogo_sort/code/scala/bogo.scala +++ /dev/null @@ -1,25 +0,0 @@ -import scala.util.Random.shuffle - -object BogoSort { - - def isSorted(list: List[Int]): Boolean = - list match { - case Nil => true - case a :: b :: _ if a > b => false - case _ :: tail => isSorted(tail) - } - - def bogoSort(list: List[Int]): List[Int] = - isSorted(list) match { - case false => bogoSort(shuffle(list)) - case _ => list - } - - def main(args: Array[String]): Unit = { - val unsorted = List(5, 2, 7, 1, -5) - - println("Unsorted list is " + unsorted) - println(" Sorted list is " + bogoSort(unsorted)) - } - -} diff --git a/contents/bogo_sort/code/smalltalk/bogosort.st b/contents/bogo_sort/code/smalltalk/bogosort.st deleted file mode 100644 index 73d0ced6d..000000000 --- a/contents/bogo_sort/code/smalltalk/bogosort.st +++ /dev/null @@ -1,9 +0,0 @@ -"Add this to the SequenceableCollection: " -SequenceableCollection>>bogoSort - "A simple bogosort." - [ self isSorted ] whileFalse: [ - self shuffle. - ] - -"Then you can run this anywhere: " -#(4 3 2 1 6 5) bogoSort "#(1 2 3 4 5 6)" diff --git a/contents/bogo_sort/code/swift/bogosort.swift b/contents/bogo_sort/code/swift/bogosort.swift deleted file mode 100644 index df340778e..000000000 --- a/contents/bogo_sort/code/swift/bogosort.swift +++ /dev/null @@ -1,26 +0,0 @@ -import Foundation - -func isSorted(inputArray: [Int]) -> Bool { - for i in 0.. inputArray[i+1] { - return false - } - } - - return true -} - -func bogoSort(sortArray: inout [Int]) -> [Int] { - while(!isSorted(inputArray: sortArray)) { - sortArray.shuffle() - } - - return sortArray -} - -func main() { - var testArray = [4,5,123,24,34,-5] - print(bogoSort(sortArray: &testArray)) -} - -main() diff --git a/contents/box_muller/box_muller.md b/contents/box_muller/box_muller.md new file mode 100644 index 000000000..2c7ce34da --- /dev/null +++ b/contents/box_muller/box_muller.md @@ -0,0 +1,240 @@ +# The Box—Muller Transform + +The Box—Muller transform holds a special place in my heart as it was the first method I ever had to implement for my own research. + +The purpose of this transformation is simple. +It takes a uniform (probably random) distribution and turns it into a Gaussian one. + +

+ +

+ +That's it. + +It was originally developed by George Box (yes, Box is his last name) and Mervin Muller in 1958 and is one of the most common methods to create a random, Gaussian distribution of points {{ "box1958" | cite }}. +It's particularly useful when initializing a set of particles for a physical, N-body simulation. +This chapter will be divided into a few subsections: + +1. How to initialize the Box—Muller transform +2. How to use the Box—Muller transform in Cartesian coordinates {{ "box_muller_wiki" | cite }}. +3. How to use the Box—Muller transform in Polar Coordinates, also called the Marsaglia transform {{ "marsaglia_wiki" | cite }}. + +Of course, there will be full code examples at the bottom. +So, let's get to it! + +## How to initialize the Box—Muller transform + +The main thing to mention here is that the Box—Muller transform requires some form of uniform distribution as its input. +One obvious way to initialize a random distribution of points is to start with a grid, like so: + +{% method %} +{% sample lang="jl" %} +[import:3-32, lang:"julia"](code/julia/box_muller.jl) +{% endmethod %} + +This will create the following set of points for $$n=100$$: + +

+ +

+ +To be honest, there are a bunch of ways to generate this exact same distribution. +Here, we simply walked backwards half of the grid size, determined the step size, and then placed a particle at each step. +Note that there is an inherent limitation with this method in that it only works for a square numbers. +Because of this, we decided to round $$n$$ up to the nearest square to make a nice grid. +It's not the cleanest implementation, but the grid will mainly be used for debugging anyway, so it's OK to be a *little* messy here. + +The real star of the show is the uniform random distribution, which can be generated like this: + +{% method %} +{% sample lang="jl" %} +[import:34-37, lang:"julia"](code/julia/box_muller.jl) +{% endmethod %} + +This will create the following set of points for $$n=100$$: + +

+ +

+ +OK, but how do we know this is uniform? +Good question! + +The easiest way is to plot a histogram of a super large number of points. +If the random distribution is uniform, then all the bins should be roughly the same value. +The more points we have, the smaller the percent difference between the bins will be. +Here is a set of images generated by `rand()` for $$n=100$$, $$1,000$$, and $$10,000$$ all in one dimension: + + +| $$100$$ | $$1,000$$ | $$10,000$$ | +|---------|-----------|------------| +|![100 points](res/rand100.png)|![1000 points](res/rand1000.png)|![10000 points](res/rand10000.png)| + +It is clear that the $$10,000$$ case looks the most uniform. +Note that for two dimensions, the same logic applies, but we need to create separate histograms for the $$x$$ and $$y$$ coordinates. + +Once this test is complete, we can be fairly sure that the function we are using to generate the initial distribution is uniform and ready for the next step of the process: actually using the Box—Muller transform! + +## How to use the Box—Muller transform in Cartesian coordinates + +The two dimensional Cartesian version of the Box—Muller transform starts with two random input values ($$u_1$$ and $$u_2$$), both of which come from their own uniform distributions that are between $$0$$ and $$1$$. +It then creates two output points ($$z_1$$ and $$z_2$$). +For this, $$u_1$$ is used to create a Gaussian distribution along some radial value $$r$$, and $$u_2$$ is used to spin that around a circle with some angular component $$\theta$$, such that + +$$ +\begin{align} +r &= \sqrt{-2\ln(u_1)} \\ +\theta &= 2\pi u_2. +\end{align} +$$ + +Looking at these equations, $$\theta$$ seems to make a decent amount of sense. +After all, angles typically vary from $$0 \rightarrow 2\pi$$, and our input distribution varies from $$0 \rightarrow 1$$, so we can get some value between $$0$$ and $$2\pi$$ by multiplying $$2\pi$$ by one of our input values. + +So what about $$r$$? +Well, remember that if we want $$u$$ to be in a Gaussian form, then we might say something like, $$u = e^{-\frac{r^2}{2}}$$, so if we solve this for $$r$$, we get $$r=\sqrt{-2\ln(u)}$$. + +From these values, we can calculate our new $$x,y$$ points as, + +$$ +\begin{align} +x &= r\cos(\theta) \\ +y &= r\sin(\theta). +\end{align} +$$ + +Finally, in order to specify the size and shape of the generated Gaussian distribution, we can use the standard deviation, $$\sigma$$, and the mean, $$\mu$$, like so: + +$$ +\begin{align} +z_1 &= x\sigma + \mu \\ +z_2 &= y\sigma + \mu. +\end{align} +$$ + +In general, this can be written in code like so: + +{% method %} +{% sample lang="jl" %} +[import:41-49, lang:"julia"](code/julia/box_muller.jl) +{% endmethod %} + +Which produces the following output + +

+ + +

+ +Note that we have written the code to work on a single set of input values, but it could also be written to read in the entire distribution of points all at once. +As this particular technique is usually implemented in parallel, it's up to you to decided which is the fastest for your own individual use-case. + +At this stage, we have a good idea of how the transform works, but some people shy away from the Cartesian method in practice and instead opt for the polar form, which will be discussed next! + +## How to use the Box—Muller transform in polar coordinates + +The Cartesian form of the Box—Muller transform is relatively intuitive. +The polar method is essentially the same, but without the costly $$\sin$$ and $$\cos$$ operations. +In this case, we use the input values to create an initial radial point (to be scaled later): + +$$ +r_0 = \sqrt{u_1^2 + u_2^2}. +$$ + +This means that we are essentially trying to transform our set of $$u$$ values into a new input value $$r_0$$. +To do this, we need to start with a uniformly distributed *circle*, so we must reject any values for $$u_1$$ and $$u_2$$ where $$r$$ is either $$0$$ or $$\gt 1$$. +This also means that the initial distributions of $$u_1$$ and $$u_2$$ must range from $$-1 \rightarrow +1$$. + +From here, we can use basic trigonometric identities to redefine the $$\sin$$ and $$\cos$$ to be + +$$ +\begin{align} +\cos(\theta) &= u_1/\sqrt{r_0} \\ +\sin(\theta) &= u_2/\sqrt{r_0}. +\end{align} +$$ + +This changes the output equations to be + +$$ +\begin{align} +x &= r\cos(\theta) = \sqrt{-2\ln(r_0)}\left(\frac{u_1}{\sqrt{r_0}}\right) = u_1 \sqrt{\frac{-2\ln(r_0)}{r_0}} \\ +y &= r\sin(\theta) = \sqrt{-2\ln(r_0)}\left(\frac{u_2}{\sqrt{r_0}}\right) = u_2 \sqrt{\frac{-2\ln(r_0)}{r_0}}. +\end{align} +$$ + +Again, the final values are: + +$$ +\begin{align} +z_1 &= \sigma x + \mu \\ +z_2 &= \sigma y + \mu. +\end{align} +$$ + +In code, it might look like this: + +{% method %} +{% sample lang="jl" %} +[import:52-63, lang:"julia"](code/julia/box_muller.jl) +{% endmethod %} + +This will produce the following output: + +

+ + +

+ +Again, this is ultimately the same as the Cartesian method, except that it: +1. Rejects points in the initial distribution that are outside of the unit circle (also called rejection sampling) +2. Avoids costly $$\sin$$ and $$\cos$$ operations + +Point 2 means that the polar method *should be* way faster than the Cartesian one, but rejection sampling is somewhat interesting in it's own right, which we have discussed in a [separate chapter](box_muller_rejection.md) + +## Example Code + +The example code here is straightforward: we start with a uniform distribution of points (both on a grid and a uniform random distribution) and then we preform the Box—Muller transform to see how far off it is from the Gaussian we expect. + +{% method %} +{% sample lang="jl" %} +[import, lang:"julia"](code/julia/box_muller.jl) +{% endmethod %} + +### Bibliography + +{% references %} {% endreferences %} + + + +## License + +##### Code Examples + +The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/main/LICENSE.md)). + +##### Text + +The text of this chapter was written by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). + +[

](https://creativecommons.org/licenses/by-sa/4.0/) + +#### Images/Graphics + +- The image "[IFS triangle 1](../IFS/res/IFS_triangle_1.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[IFS square 3](../IFS/res/IFS_square_3.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[Simple Barnsley fern](res/full_fern.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Affine random transform 0](res/affine_rnd_0.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Affine random transform 1](res/affine_rnd_1.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Affine random transform 2](res/affine_rnd_2.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Affine random transform 3](res/affine_rnd_3.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Affine fern transform 0](res/affine_fern_0.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Affine fern transform 1](res/affine_fern_1.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Affine fern transform 2](res/affine_fern_2.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Affine fern transform 3](res/affine_fern_3.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Fern twiddle 0](res/fern_twiddle_0.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Fern twiddle 1](res/fern_twiddle_1.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Fern twiddle 2](res/fern_twiddle_2.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Fern twiddle 3](res/fern_twiddle_3.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). diff --git a/contents/box_muller/box_muller_rejection.md b/contents/box_muller/box_muller_rejection.md new file mode 100644 index 000000000..37e1c0e3d --- /dev/null +++ b/contents/box_muller/box_muller_rejection.md @@ -0,0 +1,126 @@ +# Just how costly is rejection sampling anyway? + +Let's imagine we want to have a final Gaussian distribution with $$n$$ particles in it. +With the Cartesian Box—Muller method, this is easy: start the initial distribution(s) with $$n$$ particles and then do the transform. +Things *can* be just as easy with the Polar Box—Muller method as well, so long as we start with a uniformly distributed *circle* instead of a uniformly distributed *square*. +That is to say, so long as we do the rejection sampling before-hand, the Polar Box—Muller method will always be more efficient. +To be fair, there are methods to generate a uniform distribution of points within a circle without rejection sampling, but let's assume that we require rejection sampling for this example + +This means that someone somehow needs to do the rejection sampling for the Polar method, which is sometimes a painful process. +This also means that the Box—Muller method can be used to teach some of the fundamentals of General-Purpose GPU computing. +Note that because of the specificity of this problem, all the code in this subsection will be in Julia and using the package KernelAbstractions.jl, which allows us to execute the same kernels on either CPU or GPU hardware depending on how we configure things. + +Let's first consider the case where we do the rejection sampling as a part of the polar Box—Muller kernel instead of as a pre-processing step. +In this case, we can imagine 2 separate ways of writing our kernel: +1. With replacement: In this case, we *absolutely require* the final number of points in our Gaussian distribution to be $$n$$, so if we find a point outside of the unit circle while running the kernel, we will "re-roll" again for a new point that *is* within the circle. +2. Without replacement: This means that we will start with a uniform distribution of $$n$$ points, but end with a Gaussian of $$m < n$$ points. In this case, if we find a point outside of the unit circle while running the kernel, we just ignore it by setting the output values to NaNs (or something similar). + +OK, so first with replacement: + +[import:70-84, lang:"julia"](code/julia/performance.jl) + +This is an awful idea for a number of reasons. +Here are a few: +1. If we find a point outside of the unit circle, we have to continually look for new points until we *do* find one inside of the circle. Because we are running this program in parallel, where each thread transforms one point at a time, some threads might take literally forever to find a new point (if we are really unlucky). +2. To generate new points, we need to re-generate a uniform distribution, but what if our uniform distribution is not random? What if it's a grid (or something similar) instead? In this case, we really shouldn't look for a new point on the inside of the circle as all those points have already been accounted for. +3. The `rand()` function is kinda tricky on some parallel platforms (like GPUs) and might not work out of the box. In fact, the implementation shown above can only be run on the CPU. + +OK, fine. +I don't think anyone expected a kernel with a `while` loop inside of it to be fast. +So what about a method without replacement? +Surely there is no problem if we just ignore the `while` loop altogether! +Well, the problem with this approach is a bit less straightforward, but first, code: + +[import:53-68, lang:"julia"](code/julia/performance.jl) + +To start discussing why a polar kernel without replacement is *also* a bad idea, let's go back to the [Monte Carlo chapter](../monte_carlo/monte_carlo.md), where we calculated the value of $$\pi$$ by embedding it into a circle. +There, we found that the probability of a randomly chosen point falling within the unit circle to be $$\frac{\pi r^2}{(2r)^2} = \frac{pi}{4} \sim 78.54\%$$, shown in the visual below: + +

+ +

+ +This means that a uniform distribution of points within a circle will reject $$\sim 21.46\%$$ of points on the square. +This also means that if we have a specific $$n$$ value we want for the final distribution, we will need $$\frac{1}{0.7853} \sim 1.273 \times$$ more input values on average! + +No problem! +In this hypothetical case, we don't need *exactly* $$n$$ points, so we can just start the initial distributions with $$1.273 \times n$$ points, right? + +Right. +That will work well on parallel CPU hardware, but on the GPU this will still have an issue. + +On the GPU, computation is all done in parallel, but there is a minimum unit of parallelism called a *warp*. +The warp is the smallest number of threads that can execute something in parallel and is usually about 32. +This means that if an operation is queued, all 32 threads will do it at the same time. +If 16 threads need to execute something and the other 16 threads need to execute something else, this will lead to *warp divergence* where 2 actions need to be performed instead of 1: + +

+ +

+ +In this image, every odd thread needs to perform the pink action, while the even threads need to perform the blue action. +This means that 2 separate parallel tasks will be performed, one for the even threads, another for the odd threads. +This means that if $$\ell$$ separate operations are queued, it could take $$\ell\times$$ as long for all the threads to do their work! +This is why `if` statements in a kernel can be dangerous! +If used improperly, they can cause certain threads in a warp to do different things! + +So let's imagine that the above image is part of a larger array of values, such that there are a bunch of warps with the same divergence issue. +In this case, we could sort the array before-hand so that all even elements come before all odd elements. +This would mean that the warps will almost certainly not diverge because the elements queued will all be of the same type and require the same operations. +Unfortunately, this comes at the cost of a sorting operation which is prohibitively expensive. + +If we look at the above kernel, we are essentially asking $$21.47\%$$ of our threads to do something different than everyone else, and because we are usually inputting a uniform random distribution, this means that *most* warps will have to queue up 2 parallel actions instead of 1. + +Essentially, we need to pick our poison: +* Slow $$\sin$$ and $$\cos$$ operations with the Cartesian method +* Warp divergence with the Polar method + +The only way to know which is better is to perform benchmarks, which we will show in a bit, but there is one final scenario we should consider: what about doing the rejection sampling as a pre-processing step? +This would mean that we pre-initialize the polar kernel with a uniform distribution of points in the unit circle. +This means no warp divergence, so we can get the best of both worlds, right? + +Well, not exactly. +The polar Box—Muller method will definitely be faster, but again: someone somewhere needed to do rejection sampling and if we include that step into the process, things become complicated again. +The truth is that this pre-processing step is difficult to get right, so it might require a chapter in it's own right. + +In many cases, it's worth spending a little time before-hand to make sure subsequent operations are fast, but in this case, we only have a single operation, not a set of operations. +The Box—Muller method will usually only be used once at the start of the simulation, which means that the pre-processing step of rejection sampling might end up being overkill. + +No matter the case, benchmarks will show the true nature of what we are dealing with here: + +| Method | CPU | GPU | +| ------------------------- | ---------------------- | ---------------------- | +| Cartesian | $$385.819 \pm 1.9$$ms | $$19.347 \pm 0.618$$ms | +| Polar without replacement | $$273.308 \pm 2.81$$ms | $$26.712 \pm 0.592$$ms | +| Polar with replacement | $$433.644 \pm 2.64$$ms | NA | + +These were run with an Nvidia GTX 970 GPU and a Ryzen 3700X 16 core CPU. +For those interested, the code can be found below. +For these benchmarks, we used Julia's inbuilt benchmarking suite from `BenchmarkTools`, making sure to sync the GPU kernels with `CUDA.@sync`. +We also ran with $$4096^2$$ input points. + +Here, we see an interesting divergence in the results. +On the CPU, the polar method is *always* faster, but on the GPU, both methods are comparable. +I believe this is the most important lesson to be learned from the Box—Muller method: sometimes, no matter how hard you try to optimize your code, different hardware can provide radically different results! +It's incredibly important to benchmark code to make sure it is actually is as performant as you think it is! + +## Full Script + +[import, lang:"julia"](code/julia/performance.jl) + + + +## License + +##### Code Examples + +The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/main/LICENSE.md)). + +##### Text + +The text of this chapter was written by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). + +[

](https://creativecommons.org/licenses/by-sa/4.0/) + diff --git a/contents/box_muller/code/julia/box_muller.jl b/contents/box_muller/code/julia/box_muller.jl new file mode 100644 index 000000000..82f614bd6 --- /dev/null +++ b/contents/box_muller/code/julia/box_muller.jl @@ -0,0 +1,156 @@ +using DelimitedFiles, LinearAlgebra +using Test + +function create_grid(n, endpoints) + + grid_extents = endpoints[2] - endpoints[1] + + # number of points along any given axis + # For 2D, we take the sqrt(n) and then round up + axis_num = ceil(Int, sqrt(n)) + + # we are now rounding n up to the nearest square if it was not already one + if sqrt(n) != axis_num + n = axis_num^2 + end + + # Distance between each point + dx = grid_extents / (axis_num) + + # Initializing the array, particles along the column, dimensions along rows + a = zeros(n, 2) + + # This loops over the relevant dimensions + for i = 1:axis_num + for j = 1:axis_num + a[(i - 1) * axis_num + j, :] .= + [(i - 0.5) * dx + endpoints[1], + (j - 0.5) * dx + endpoints[1]] + end + end + + return a +end + +function create_rand_dist(n, endpoints) + grid_extents = endpoints[2] - endpoints[1] + return(rand(n,2) * grid_extents .+ endpoints[1]) +end + +# This function reads in a pair of input points and performs the Cartesian +# Box--Muller transform +function cartesian_box_muller(input_pts, sigma, mu) + r = sqrt(-2 * log(input_pts[1])) + theta = 2 * pi * input_pts[2] + + return [sigma * r * cos(theta) + mu[1], + sigma * r * sin(theta) + mu[2]] + +end + +# This function reads in a pair of input points and performs the Cartesian +# Box--Muller transform +function polar_box_muller(input_pts, sigma, mu) + r_0 = input_pts[1]^2 + input_pts[2]^2 + + # this method is only valid for points within the unit circle + if r_0 == 0 || r_0 > 1 + return [NaN, NaN] + end + + return [sigma * input_pts[1] * sqrt(-2 * log(r_0) / r_0) + mu[1], + sigma * input_pts[2] * sqrt(-2 * log(r_0) / r_0) + mu[2]] + +end + +function is_gaussian(input_pts; bounds = [-1 1; -1 1], dx = 0.1, + sigma = 1, mu = [0,0], threshold = 0.1) + histogram = zeros(ceil(Int,(bounds[1,2]-bounds[1,1])/dx), + ceil(Int,(bounds[2,2]-bounds[2,1])/dx)) + + for i = 1:size(input_pts)[1] + input_x = input_pts[i, 1] + input_y = input_pts[i, 2] + if !(isnan(input_x) || isnan(input_y)) + + bin = CartesianIndex(ceil(Int, (input_x - bounds[1,1]) / dx), + ceil(Int, (input_y - bounds[2,1]) / dx)) + + if bin[1] <= size(histogram)[1] && bin[1] > 0 && + bin[2] <= size(histogram)[2] && bin[2] > 0 + histogram[bin] += 1 + end + end + end + + n = sum(histogram) + normalize!(histogram) + + rms = 0 + for i = 1:size(histogram)[1] + x = bounds[1,1] + i*dx + for j = 1:size(histogram)[2] + y = bounds[2,1] + j*dx + gaussian_value = exp(-(((x+mu[1])^2)/(2*sigma^2) + + ((y+mu[2])^2)/(2*sigma^2))) + rms += (gaussian_value - histogram[i,j])^2 + end + end + + return sqrt(rms/n) < threshold +end + +function main(n) + + # This casts the input onto the nearest square for the cartesian grids + n = Int(ceil(sqrt(n))^2) + + cartesian_grid = create_grid(n, [0,1]) + polar_grid = create_grid(n, [-1,1]) + cartesian_rand = create_rand_dist(n, [0,1]) + polar_rand = create_rand_dist(n, [-1,1]) + + cartesian_grid_output = similar(cartesian_grid) + polar_grid_output = similar(polar_grid) + cartesian_rand_output = similar(cartesian_rand) + polar_rand_output = similar(polar_rand) + + # going through each pair of points and using the x,y coordinates in + # their respective functions + for i = 1:size(cartesian_grid)[1] + cartesian_grid_output[i,:] .= + cartesian_box_muller(cartesian_grid[i,:], 1, [0,0]) + + polar_grid_output[i,:] .= polar_box_muller(polar_grid[i,:], 1, [0,0]) + + cartesian_rand_output[i,:] .= + cartesian_box_muller(cartesian_rand[i,:], 1, [0,0]) + + polar_rand_output[i,:] .= polar_box_muller(polar_rand[i,:], 1, [0,0]) + end + + @testset "histogram tests of Box--Muller Gaussianness" begin + @test is_gaussian(cartesian_grid_output; + bounds = [-3 3; -3 3], dx = 0.3, + sigma = 1, mu = [0,0]) + @test is_gaussian(cartesian_rand_output; + bounds = [-3 3; -3 3], dx = 0.3, + sigma = 1, mu = [0,0]) + @test is_gaussian(polar_grid_output; + bounds = [-3 3; -3 3], dx = 0.3, + sigma = 1, mu = [0,0]) + @test is_gaussian(polar_rand_output; + bounds = [-3 3; -3 3], dx = 0.3, + sigma = 1, mu = [0,0]) + end + + writedlm("cartesian_grid_output.dat", cartesian_grid_output) + writedlm("polar_grid_output.dat", polar_grid_output) + writedlm("cartesian_rand_output.dat", cartesian_rand_output) + writedlm("polar_rand_output.dat", polar_rand_output) + + writedlm("cartesian_grid.dat", cartesian_grid) + writedlm("polar_grid.dat", polar_grid) + writedlm("cartesian_rand.dat", cartesian_rand) + writedlm("polar_rand.dat", polar_rand) +end diff --git a/contents/box_muller/code/julia/performance.jl b/contents/box_muller/code/julia/performance.jl new file mode 100644 index 000000000..d8732df33 --- /dev/null +++ b/contents/box_muller/code/julia/performance.jl @@ -0,0 +1,142 @@ +using KernelAbstractions +using CUDA + +if has_cuda_gpu() + using CUDAKernels +end + +function create_grid(n, endpoints; AT = Array) + + grid_extents = endpoints[2] - endpoints[1] + + # number of points along any given axis + # For 2D, we take the sqrt(n) and then round up + axis_num = ceil(Int, sqrt(n)) + + # we are now rounding n up to the nearest square if it was not already one + if sqrt(n) != axis_num + n = axis_num^2 + end + + # Distance between each point + dx = grid_extents / (axis_num) + + # This is warning in the case that we do not have a square number + if sqrt(n) != axis_num + println("Cannot evenly divide ", n, " into 2 dimensions!") + end + + # Initializing the array, particles along the column, dimensions along rows + a = AT(zeros(n, 2)) + + # This works by firxt generating an N dimensional tuple with the number + # of particles to be places along each dimension ((10,10) for 2D and n=100) + # Then we generate the list of all CartesianIndices and cast that onto a + # grid by multiplying by dx and subtracting grid_extents/2 + for i = 1:axis_num + for j = 1:axis_num + a[(i - 1) * axis_num + j, 1] = i * dx + endpoints[1] + a[(i - 1) * axis_num + j, 2] = j * dx + endpoints[1] + end + end + + return a +end + +function create_rand_dist(n, endpoints; AT = Array) + grid_extents = endpoints[2] - endpoints[1] + return(AT(rand(n,2)) * grid_extents .+ endpoints[1]) +end + +# This function reads in a pair of input points and performs the Cartesian +# Box--Muller transform +@kernel function polar_muller_noreplacement!(input_pts, output_pts, sigma, mu) + tid = @index(Global, Linear) + @inbounds r_0 = input_pts[tid, 1]^2 + input_pts[tid, 2]^2 + + # this method is only valid for points within the unit circle + if r_0 == 0 || r_0 > 1 + @inbounds output_pts[tid,1] = NaN + @inbounds output_pts[tid,2] = NaN + else + @inbounds output_pts[tid,1] = sigma * input_pts[tid,1] * + sqrt(-2 * log(r_0) / r_0) + mu + @inbounds output_pts[tid,2] = sigma * input_pts[tid, 2] * + sqrt(-2 * log(r_0) / r_0) + mu + end + +end + +@kernel function polar_muller_replacement!(input_pts, output_pts, sigma, mu) + tid = @index(Global, Linear) + @inbounds r_0 = input_pts[tid, 1]^2 + input_pts[tid, 2]^2 + + while r_0 > 1 || r_0 == 0 + p1 = rand()*2-1 + p2 = rand()*2-1 + r_0 = p1^2 + p2^2 + end + + @inbounds output_pts[tid,1] = sigma * input_pts[tid,1] * + sqrt(-2 * log(r_0) / r_0) + mu + @inbounds output_pts[tid,2] = sigma * input_pts[tid, 2] * + sqrt(-2 * log(r_0) / r_0) + mu +end + + +function polar_box_muller!(input_pts, output_pts, sigma, mu; + numthreads = 256, numcores = 4, + f = polar_muller_noreplacement!) + if isa(input_pts, Array) + kernel! = f(CPU(), numcores) + else + kernel! = f(CUDADevice(), numthreads) + end + kernel!(input_pts, output_pts, sigma, mu, ndrange=size(input_pts)[1]) +end + + +@kernel function cartesian_kernel!(input_pts, output_pts, sigma, mu) + tid = @index(Global, Linear) + + @inbounds r = sqrt(-2 * log(input_pts[tid,1])) + @inbounds theta = 2 * pi * input_pts[tid, 2] + + @inbounds output_pts[tid,1] = sigma * r * cos(theta) + mu + @inbounds output_pts[tid,2] = sigma * r * sin(theta) + mu +end + +function cartesian_box_muller!(input_pts, output_pts, sigma, mu; + numthreads = 256, numcores = 4) + if isa(input_pts, Array) + kernel! = cartesian_kernel!(CPU(), numcores) + else + kernel! = cartesian_kernel!(CUDADevice(), numthreads) + end + + kernel!(input_pts, output_pts, sigma, mu, ndrange=size(input_pts)[1]) +end + +function main() + + input_pts = create_rand_dist(4096^2,[0,1]) + output_pts = create_rand_dist(4096^2,[0,1]) + + wait(cartesian_box_muller!(input_pts, output_pts, 1, 0)) + @time wait(cartesian_box_muller!(input_pts, output_pts, 1, 0)) + wait(polar_box_muller!(input_pts, output_pts, 1, 0)) + @time wait(polar_box_muller!(input_pts, output_pts, 1, 0)) + + if has_cuda_gpu() + input_pts = create_rand_dist(4096^2,[0,1], AT = CuArray) + output_pts = create_rand_dist(4096^2,[0,1], AT = CuArray) + + wait(cartesian_box_muller!(input_pts, output_pts, 1, 0)) + CUDA.@time wait(cartesian_box_muller!(input_pts, output_pts, 1, 0)) + wait(polar_box_muller!(input_pts, output_pts, 1, 0)) + CUDA.@time wait(polar_box_muller!(input_pts, output_pts, 1, 0)) + end + +end + +main() diff --git a/contents/box_muller/code/julia/plot.gp b/contents/box_muller/code/julia/plot.gp new file mode 100644 index 000000000..7dbbf4808 --- /dev/null +++ b/contents/box_muller/code/julia/plot.gp @@ -0,0 +1,31 @@ +set terminal pngcairo +set size square + +set output "cartesian_grid.png" +p "cartesian_grid.dat" pt 7 title "" + +set output "cartesian_rand.png" +p "cartesian_rand.dat" pt 7 title "" + +set output "polar_rand.png" +p "polar_rand.dat" pt 7 title "" + +set output "polar_grid.png" +p "polar_grid.dat" pt 7 title "" + + +set xrange[-3:3] +set yrange[-3:3] + +set output "polar_grid_output.png" +p "polar_grid_output.dat" pt 7 title "" + +set output "polar_rand_output.png" +p "polar_rand_output.dat" pt 7 title "" + +set output "cartesian_rand_output.png" +p "cartesian_rand_output.dat" pt 7 title "" + +set output "cartesian_grid_output.png" +p "cartesian_grid_output.dat" pt 7 title "" + diff --git a/contents/box_muller/res/cartesian_grid.png b/contents/box_muller/res/cartesian_grid.png new file mode 100644 index 000000000..c96144a65 Binary files /dev/null and b/contents/box_muller/res/cartesian_grid.png differ diff --git a/contents/box_muller/res/cartesian_grid_output.png b/contents/box_muller/res/cartesian_grid_output.png new file mode 100644 index 000000000..daf0151af Binary files /dev/null and b/contents/box_muller/res/cartesian_grid_output.png differ diff --git a/contents/box_muller/res/cartesian_grid_transform.png b/contents/box_muller/res/cartesian_grid_transform.png new file mode 100644 index 000000000..c5e88c484 Binary files /dev/null and b/contents/box_muller/res/cartesian_grid_transform.png differ diff --git a/contents/box_muller/res/cartesian_grid_transform.xcf b/contents/box_muller/res/cartesian_grid_transform.xcf new file mode 100644 index 000000000..b81accf3d Binary files /dev/null and b/contents/box_muller/res/cartesian_grid_transform.xcf differ diff --git a/contents/box_muller/res/cartesian_rand.png b/contents/box_muller/res/cartesian_rand.png new file mode 100644 index 000000000..8c21b32d7 Binary files /dev/null and b/contents/box_muller/res/cartesian_rand.png differ diff --git a/contents/box_muller/res/cartesian_rand_output.png b/contents/box_muller/res/cartesian_rand_output.png new file mode 100644 index 000000000..3be2945ce Binary files /dev/null and b/contents/box_muller/res/cartesian_rand_output.png differ diff --git a/contents/box_muller/res/cartesian_rand_transform.png b/contents/box_muller/res/cartesian_rand_transform.png new file mode 100644 index 000000000..08018bf34 Binary files /dev/null and b/contents/box_muller/res/cartesian_rand_transform.png differ diff --git a/contents/box_muller/res/grid.png b/contents/box_muller/res/grid.png new file mode 100644 index 000000000..bc4e80890 Binary files /dev/null and b/contents/box_muller/res/grid.png differ diff --git a/contents/box_muller/res/polar_grid.png b/contents/box_muller/res/polar_grid.png new file mode 100644 index 000000000..36088f773 Binary files /dev/null and b/contents/box_muller/res/polar_grid.png differ diff --git a/contents/box_muller/res/polar_grid_output.png b/contents/box_muller/res/polar_grid_output.png new file mode 100644 index 000000000..5fb774e3f Binary files /dev/null and b/contents/box_muller/res/polar_grid_output.png differ diff --git a/contents/box_muller/res/polar_grid_transform.png b/contents/box_muller/res/polar_grid_transform.png new file mode 100644 index 000000000..1335cf06a Binary files /dev/null and b/contents/box_muller/res/polar_grid_transform.png differ diff --git a/contents/box_muller/res/polar_rand.png b/contents/box_muller/res/polar_rand.png new file mode 100644 index 000000000..e4ff7ac58 Binary files /dev/null and b/contents/box_muller/res/polar_rand.png differ diff --git a/contents/box_muller/res/polar_rand_output.png b/contents/box_muller/res/polar_rand_output.png new file mode 100644 index 000000000..52944893f Binary files /dev/null and b/contents/box_muller/res/polar_rand_output.png differ diff --git a/contents/box_muller/res/polar_rand_transform.png b/contents/box_muller/res/polar_rand_transform.png new file mode 100644 index 000000000..aeefa3e4d Binary files /dev/null and b/contents/box_muller/res/polar_rand_transform.png differ diff --git a/contents/box_muller/res/rand100.png b/contents/box_muller/res/rand100.png new file mode 100644 index 000000000..5a09a248a Binary files /dev/null and b/contents/box_muller/res/rand100.png differ diff --git a/contents/box_muller/res/rand1000.png b/contents/box_muller/res/rand1000.png new file mode 100644 index 000000000..94609737d Binary files /dev/null and b/contents/box_muller/res/rand1000.png differ diff --git a/contents/box_muller/res/rand10000.png b/contents/box_muller/res/rand10000.png new file mode 100644 index 000000000..62adeaa1f Binary files /dev/null and b/contents/box_muller/res/rand10000.png differ diff --git a/contents/box_muller/res/rand_dist.png b/contents/box_muller/res/rand_dist.png new file mode 100644 index 000000000..d56d8f588 Binary files /dev/null and b/contents/box_muller/res/rand_dist.png differ diff --git a/contents/box_muller/res/right_arrow.pdf b/contents/box_muller/res/right_arrow.pdf new file mode 100644 index 000000000..9105474fa Binary files /dev/null and b/contents/box_muller/res/right_arrow.pdf differ diff --git a/contents/box_muller/res/warp_divergence.png b/contents/box_muller/res/warp_divergence.png new file mode 100644 index 000000000..7b385b82c Binary files /dev/null and b/contents/box_muller/res/warp_divergence.png differ diff --git a/contents/bubble_sort/bubble_sort.md b/contents/bubble_sort/bubble_sort.md deleted file mode 100644 index 3a1bb7e9a..000000000 --- a/contents/bubble_sort/bubble_sort.md +++ /dev/null @@ -1,171 +0,0 @@ -# Bubble Sort -When it comes to sorting algorithms, Bubble Sort is usually the first that comes to mind. -Though it might not be the fastest tool in the shed, it's definitely straightforward to implement and is often the first sorting method new programmers think of when trying to implement a sorting method on their own. - -Here's how it works: we go through each element in our vector and check to see if it is larger than the element to it's right. -If it is, we swap the elements and then move to the next element. -In this way, we sweep through the array $$n$$ times for each element and continually swap any two adjacent elements that are improperly ordered. -This means that we need to go through the vector $$\mathcal{O}(n^2)$$ times with code similar to the following: - -{% method %} -{% sample lang="jl" %} -[import:1-10, lang:"julia"](code/julia/bubble.jl) -{% sample lang="cs" %} -[import:9-27, lang:"csharp"](code/csharp/BubbleSort.cs) -{% sample lang="c" %} -[import:10-20, lang:"c"](code/c/bubble_sort.c) -{% sample lang="c8" %} -[import:39-63, lang:"chip-8"](code/chip8/bubblesort.c8) -{% sample lang="java" %} -[import:2-12, lang:"java"](code/java/Bubble.java) -{% sample lang="kotlin" %} -[import:1-11, lang:"kotlin"](code/kotlin/BubbleSort.kt) -{% sample lang="js" %} -[import:1-12, lang:"javascript"](code/javascript/bubble.js) -{% sample lang="py" %} -[import:4-9, lang:"python"](code/python/bubblesort.py) -{% sample lang="m" %} -[import:1-13, lang:"matlab"](code/matlab/bubblesort.m) -{% sample lang="lua" %} -[import:1-9, lang="lua"](code/lua/bubble_sort.lua) -{% sample lang="hs" %} -[import, lang:"haskell"](code/haskell/bubbleSort.hs) -{% sample lang="cpp" %} -[import:13-23, lang:"cpp"](code/c++/bubblesort.cpp) -{% sample lang="rs" %} -[import:6-16, lang:"rust"](code/rust/bubble_sort.rs) -{% sample lang="d" %} -[import:3-18, lang:"d"](code/d/bubble_sort.d) -{% sample lang="go" %} -[import:7-21, lang:"go"](code/go/bubbleSort.go) -{% sample lang="racket" %} -[import:6-19, lang:"scheme"](code/racket/bubbleSort.rkt) -{% sample lang="swift" %} -[import:1-13, lang:"swift"](code/swift/bubblesort.swift) -{% sample lang="ti83b" %} -[import:2-13, lang:"ti-83_basic"](code/ti83basic/BUBLSORT.txt) -{% sample lang="ruby" %} -[import:3-13, lang:"ruby"](code/ruby/bubble.rb) -{% sample lang="crystal" %} -[import:1-11, lang:"crystal"](code/crystal/bubble.cr) -{% sample lang="php" %} -[import:4-17, lang:"php"](code/php/bubble_sort.php) -{% sample lang="lisp" %} -[import:3-28, lang:"lisp"](code/clisp/bubble_sort.lisp) -{% sample lang="nim" %} -[import:5-9, lang:"nim"](code/nim/bubble_sort.nim) -{% sample lang="st" %} -[import:2-15, lang:"smalltalk"](code/smalltalk/bubble.st) -{% sample lang="asm-x64" %} -[import:43-66, lang:"asm-x64"](code/asm-x64/bubble_sort.s) -{% sample lang="f90" %} -[import:19-40, lang:"fortran"](code/fortran/bubble.f90) -{% sample lang="bf" %} -[import:17-63, lang:"brainfuck"](code/brainfuck/bubblesort.bf) -{% sample lang="scala" %} -[import:3-14, lang:"scala"](code/scala/bubble_sort.scala) -{% sample lang="emojic" %} -[import:2-14, lang:"emojicode"](code/emojicode/bubble_sort.emojic) -{% sample lang="bash" %} -[import:2-21, lang:"bash"](code/bash/bubble_sort.bash) -{% sample lang="scratch" %} -

- -

-{% endmethod %} - -... And that's it for the simplest bubble sort method. -Now, as you might imagine, computer scientists have optimized this to the fiery lakes of Michigan and back, so we'll come back to this in the future and talk about how to optimize it. -For now, it's fine to just bask in the simplicity that is bubble sort. -Trust me, there are plenty of more complicated algorithms that do precisely the same thing, only much, much better (for most cases). - -## Example Code - -{% method %} -{% sample lang="jl" %} -[import, lang:"julia"](code/julia/bubble.jl) -{% sample lang="cs" %} -##### BubbleSort.cs -[import, lang:"csharp"](code/csharp/BubbleSort.cs) -##### Program.cs -[import, lang:"csharp"](code/csharp/Program.cs) -{% sample lang="c" %} -[import, lang:"c"](code/c/bubble_sort.c) -{% sample lang="c8" %} -[import, lang:"chip-8"](code/chip8/bubblesort.c8) -{% sample lang="java" %} -[import, lang:"java"](code/java/Bubble.java) -{% sample lang="kotlin" %} -[import, lang:"kotlin"](code/kotlin/BubbleSort.kt) -{% sample lang="js" %} -[import, lang:"javascript"](code/javascript/bubble.js) -{% sample lang="py" %} -[import, lang:"python"](code/python/bubblesort.py) -{% sample lang="m" %} -[import, lang:"matlab"](code/matlab/bubblesort.m) -{% sample lang="lua" %} -[import, lang="lua"](code/lua/bubble_sort.lua) -{% sample lang="hs" %} -[import, lang:"haskell"](code/haskell/bubbleSort.hs) -{% sample lang="cpp" %} -[import, lang:"cpp"](code/c++/bubblesort.cpp) -{% sample lang="rs" %} -[import, lang:"rust"](code/rust/bubble_sort.rs) -{% sample lang="d" %} -[import, lang:"d"](code/d/bubble_sort.d) -{% sample lang="go" %} -[import, lang:"go"](code/go/bubbleSort.go) -{% sample lang="racket" %} -[import, lang:"scheme"](code/racket/bubbleSort.rkt) -{% sample lang="swift" %} -[import, lang:"swift"](code/swift/bubblesort.swift) -{% sample lang="ti83b" %} -[import, lang:"ti-83_basic"](code/ti83basic/BUBLSORT.txt) -{% sample lang="ruby" %} -[import, lang:"ruby"](code/ruby/bubble.rb) -{% sample lang="crystal" %} -[import, lang:"crystal"](code/crystal/bubble.cr) -{% sample lang="php" %} -[import, lang:"php"](code/php/bubble_sort.php) -{% sample lang="lisp" %} -[import, lang:"lisp"](code/clisp/bubble_sort.lisp) -{% sample lang="nim" %} -[import, lang:"nim"](code/nim/bubble_sort.nim) -{% sample lang="asm-x64" %} -[import, lang:"asm-x64"](code/asm-x64/bubble_sort.s) -{% sample lang="f90" %} -[import, lang:"fortran"](code/fortran/bubble.f90) -{% sample lang="bf" %} -[import, lang:"brainfuck"](code/brainfuck/bubblesort.bf) -{% sample lang="st" %} -[import, lang:"smalltalk"](code/smalltalk/bubble.st) -{% sample lang="scala" %} -[import, lang:"scala"](code/scala/bubble_sort.scala) -{% sample lang="emojic" %} -[import, lang:"emojicode"](code/emojicode/bubble_sort.emojic) -{% sample lang="bash" %} -[import, lang:"bash"](code/bash/bubble_sort.bash) -{% sample lang="scratch" %} -The code snippet was taken from this [Scratch project](https://scratch.mit.edu/projects/316483792) -{% endmethod %} - - - -## License - -##### Code Examples - -The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/master/LICENSE.md)). - -##### Text - -The text of this chapter was written by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). - -[

](https://creativecommons.org/licenses/by-sa/4.0/) - -##### Pull Requests - -After initial licensing ([#560](https://github.com/algorithm-archivists/algorithm-archive/pull/560)), the following pull requests have modified the text or graphics of this chapter: -- none diff --git a/contents/bubble_sort/code/asm-x64/bubble_sort.s b/contents/bubble_sort/code/asm-x64/bubble_sort.s deleted file mode 100644 index 9521ded40..000000000 --- a/contents/bubble_sort/code/asm-x64/bubble_sort.s +++ /dev/null @@ -1,106 +0,0 @@ -.intel_syntax noprefix - -.section .rodata - array: - .align 16 - .int 1, 45, 756, 4569, 56, 3, 8, 5, -10, -4 - .equ array_len, (.-array) / 4 - array_fmt: .string "%d " - lf: .string "\n" - unsorted: .string "Unsorted array: " - sorted: .string "Sorted array: " - -.section .text - .global main - .extern printf - -print_array: - push r12 - push r13 - - mov r12, rdi # Loop variable - lea r13, [rdi + 4*rsi] # Pointer after the last element - -print_array_loop: - cmp r12, r13 # If we're done iterating over the array then bail - jge print_array_done - mov rdi, OFFSET array_fmt # Otherwise print the current value - mov esi, DWORD PTR [r12] - xor rax, rax - call printf - lea r12, [r12 + 4] # And increment the loop variable pointer - jmp print_array_loop - -print_array_done: - mov rdi, OFFSET lf # Print a closing newline - xor rax, rax - call printf - - pop r13 - pop r12 - ret - -bubble_sort: - xor rcx, rcx # The outer loop counter - lea rdx, [rdi + 4*rsi - 4] # The end address for the inner loop - -outer_loop: - cmp rcx, rsi # We first check if the outer loop is done - jge bubble_sort_done # And if it is, return - mov rax, rdi # Otherwise we initialize the loop variable of the inner loop -inner_loop: - mov r8d, DWORD PTR [rax] # Load array[j] and array[j+1] through a pointer - mov r9d, DWORD PTR [rax + 4] - cmp r8d, r9d # If array[j] <= array[j+1] - jle loop_counters # Then we can skip this iteration - mov DWORD PTR [rax], r9d # Otherwise swap the values - mov DWORD PTR [rax + 4], r8d -loop_counters: - lea rax, [rax + 4] # First, advance the inner loop - cmp rax, rdx - jl inner_loop # And in case it's not done, repeat - inc rcx # If it is done, go back to doing the outer loop - jmp outer_loop - -bubble_sort_done: - ret - -main: - # Set up our stack - sub rsp, 40 - - # We load the array in chunks onto the stack - movaps xmm0, XMMWORD PTR [array] - movaps XMMWORD PTR [rsp], xmm0 - movaps xmm0, XMMWORD PTR [array + 16] - movaps XMMWORD PTR [rsp + 16], xmm0 - mov rax, QWORD PTR [array + 32] - mov QWORD PTR [rsp + 32], rax - - # Print the unsorted array - mov rdi, OFFSET unsorted - xor rax, rax - call printf - - mov rdi, rsp - mov rsi, array_len - call print_array - - # Sort - mov rdi, rsp - mov rsi, array_len - call bubble_sort - - # Print the sorted array - mov rdi, OFFSET sorted - xor rax, rax - call printf - - mov rdi, rsp - mov rsi, array_len - call print_array - - # Restore the stack pointer, set return value to 0 - add rsp, 40 - xor rax, rax - ret diff --git a/contents/bubble_sort/code/bash/bubble_sort.bash b/contents/bubble_sort/code/bash/bubble_sort.bash deleted file mode 100755 index 78d002760..000000000 --- a/contents/bubble_sort/code/bash/bubble_sort.bash +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env bash -bubble_sort() { - local i - local j - local tmp - local len - local arr - arr=("$@") - (( len = ${#arr[@]} )) - - for ((i = 0; i <= len - 1; i++)); do - for ((j = 0; j <= len - 2; j++)); do - if (( arr[j] > arr[(( j + 1 ))] )); then - (( tmp = arr[(( j + 1 ))] )) - (( arr[(( j + 1 ))] = arr[j] )) - (( arr[j] = tmp )) - fi - done - done - echo ${arr[*]} -} - -arr=(1 45 756 4569 56 3 8 5 -10 -4) -echo "Unsorted array: ${arr[*]}" -tmp=$(bubble_sort "${arr[@]}") -echo "Sorted array: ${tmp[*]}" diff --git a/contents/bubble_sort/code/brainfuck/bubblesort.bf b/contents/bubble_sort/code/brainfuck/bubblesort.bf deleted file mode 100644 index c51c04e0c..000000000 --- a/contents/bubble_sort/code/brainfuck/bubblesort.bf +++ /dev/null @@ -1,72 +0,0 @@ -make some extra space for a marker ->>> - -build array ->>++++ ->>++++++ ->>+++++ ->>+++ ->>+++++++ ->>+ ->>+++ ->>++ - -move to starting point ->>> - -set loop marker to 1 and starts loop -+ -[ - -delete loop marker --<<<<< - -if there is a number here -[ - -add it to the empty space to the right -and subtract it from the next number -[->+>-<<] - -undo subtraction once without a zero check in case the numbers are equal -+>>+<- - -then as long as the left number is bigger than zero -[<+>> - -if the number to the right becomes a zero in the process -due to buffer overflow set a swap marker -[<<<]<<[<<<<<+>>] - -otherwise continue resetting numbers ->>>>>+<-] - -set a "correct" marker between numbers -+ - -check swap marker -<<<<<< - -if swap marker is set delete "correct" marker -[->>>>>>-> - -and swap the numbers -[-<+>]<<[->>+<<]>[-<+>] - -go to next pair -<<<<<<]>>> - -repeat until end of array -] - -go to leftmost "correct" marker ->>> - -delete marker and jump to the next one if it's 1 -[->>] - -else delete all markers and set repetition marker ->[>[-]>>+<]>] - -program stops three places to the right of the -sorted array diff --git a/contents/bubble_sort/code/c++/bubblesort.cpp b/contents/bubble_sort/code/c++/bubblesort.cpp deleted file mode 100644 index 801b5d108..000000000 --- a/contents/bubble_sort/code/c++/bubblesort.cpp +++ /dev/null @@ -1,35 +0,0 @@ -#include -#include -#include -#include - -template -void print_range(Iter first, Iter last) { - for (auto it = first; it != last; ++it) - std::cout << *it << " "; - std::cout << std::endl; -} - -template -void bubble_sort(Iter first, Iter last) { - for (auto it1 = first; it1 != last; ++it1) { - for (auto it2 = first; it2 + 1 != last; ++it2) { - // these are unsorted! gotta swap 'em - if (*(it2 + 1) < *it2) { - std::iter_swap(it2, it2 + 1); - } - } - } -} - -int main() { - int input[] = {1, 45, 756, 4569, 56, 3, 8, 5, -10, -4}; - - std::cout << "Unsorted array:\n"; - print_range(std::begin(input), std::end(input)); - - bubble_sort(std::begin(input), std::end(input)); - - std::cout << "\nSorted array:\n"; - print_range(std::begin(input), std::end(input)); -} diff --git a/contents/bubble_sort/code/c/bubble_sort.c b/contents/bubble_sort/code/c/bubble_sort.c deleted file mode 100644 index 2cccca129..000000000 --- a/contents/bubble_sort/code/c/bubble_sort.c +++ /dev/null @@ -1,35 +0,0 @@ -#include - -void print_range(int *array, size_t n) { - for (size_t i = 0; i < n; ++i) { - printf("%d ", array[i]); - } - printf("\n"); -} - -void bubble_sort(int *array, size_t n) { - for (size_t i = 0; i < n; ++i) { - for (size_t j = 0; j < n - 1; ++j) { - if (array[j] > array[j + 1]) { - int tmp = array[j]; - array[j] = array[j + 1]; - array[j + 1] = tmp; - } - } - } -} - -int main() { - int array[] = {1, 45, 756, 4569, 56, 3, 8, 5, -10, -4}; - size_t N = sizeof(array) / sizeof(*array); - - printf("Unsorted array:\n"); - print_range(array, N); - - bubble_sort(array, N); - - printf("\nSorted array:\n"); - print_range(array, N); - - return 0; -} diff --git a/contents/bubble_sort/code/chip8/bubblesort.c8 b/contents/bubble_sort/code/chip8/bubblesort.c8 deleted file mode 100644 index 8aea23d10..000000000 --- a/contents/bubble_sort/code/chip8/bubblesort.c8 +++ /dev/null @@ -1,70 +0,0 @@ -;chip 8 bubble sort -;by codingwithethanol - - - LD V2, 1 ;index increment - LD I, ARRAY ;get array pointer - LD V3, 12 ;our array will be 12 bytes long - -FILL - RND V0, #0F ;get random byte value from 0-F - LD [I], V0 ;load it to current array index - ADD I, V2 ;increment i - ADD V3, -1 ;decrement counter - SE V3, #0 ;check if counter has reached zero - JP FILL ;if it hasnt, loop - LD V3, 2 ;x position to print array to - LD V4, 4 ;y position to print array to - CALL DISPLAY ;display array - CALL BUBBLESORT ;sort array -HERE - JP HERE ;block - -DISPLAY - LD V5, V3 ;save original x offset - ADD V5, 60 ;add length of array times char width - LD V2, 0 ;array index -DRAWLOOP - LD I, ARRAY ;get array pointer - ADD I, V2 ;add index - ADD V2, 1 ;increment index - LD V0, [I] ;load element of array - LD F, V0 ;get address of corresponding hex sprite - DRW V3, V4, 5 ;draw it at (V3, V4) - ADD V3, 5 ;increment x by char width - SE V3, V5 ;if we arent at the end of the array - JP DRAWLOOP ;draw another char - RET - -BUBBLESORT -SETUP - LD V4, 0 ;no swap has been performed - LD V3, 0 ;we are starting at the beginning of the array -CHECKLOOP - LD I, ARRAY ;load array pointer - ADD I, V3 ;load array index - LD V1, [I] ;load 2 bytes from arraypos - LD V2, V1 ;temp = b - SUB V2, V0 ;temp -= a - SE VF, 0 ;if b < a - JP GRTHAN ;jump here -LSTHAN - LD V2, V1 ;temp = b - LD V1, V0 ;b = a - LD V0, V2 ;a = temp - LD [I], V1 ;store back swapped values - LD V4, 1 ;swap = true -GRTHAN - ADD V3, 1 ;increment array index - SE V3, 12 ;if not end of array - JP CHECKLOOP ;step -CHECKDONE ;if end of array - SE V4, 0 ;if a swap was done - JP SETUP ;iterate through array again - ;otherwise - LD V3, 2 ;x position to print array to - LD V4, 12 ;y position to print array to - CALL DISPLAY ;display sorted array - RET - -ARRAY diff --git a/contents/bubble_sort/code/clisp/bubble_sort.lisp b/contents/bubble_sort/code/clisp/bubble_sort.lisp deleted file mode 100644 index 0379355f8..000000000 --- a/contents/bubble_sort/code/clisp/bubble_sort.lisp +++ /dev/null @@ -1,33 +0,0 @@ -;;;; Bubble sort implementation - -(defun bubble-up (list) - (if - (< (length list) 2) - list - (if - (> (first list) (second list)) - (cons - (second list) - (bubble-up - (cons - (first list) - (rest (rest list))))) - (cons - (first list) - (bubble-up - (rest list)))))) - -(defun bubble-sort (list) - (if - (< (length list) 2) - list - (let* ((new-list (bubble-up list))) - (append - (bubble-sort (butlast new-list)) - (last new-list))))) - -;; The built-in sort: (sort (list 5 4 3 2 1) #'<) -(print - (bubble-sort (list 5 4 3 2 1))) -(print - (bubble-sort (list 1 2 3 3 2 1))) diff --git a/contents/bubble_sort/code/crystal/bubble.cr b/contents/bubble_sort/code/crystal/bubble.cr deleted file mode 100755 index 187a38744..000000000 --- a/contents/bubble_sort/code/crystal/bubble.cr +++ /dev/null @@ -1,20 +0,0 @@ -def bubble_sort(arr) - arr = arr.dup - (0 ... arr.size).each do - (0 ... arr.size-1).each do |k| - if arr[k] > arr[k+1] - arr[k+1],arr[k] = arr[k],arr[k+1] - end - end - end - arr -end - -def main - number = 10.times.map{rand(0..1_000)}.to_a - pp "The array before sorting is #{number}" - number = bubble_sort number - pp "The array after sorting is #{number}" -end - -main diff --git a/contents/bubble_sort/code/csharp/BubbleSort.cs b/contents/bubble_sort/code/csharp/BubbleSort.cs deleted file mode 100644 index 244397b40..000000000 --- a/contents/bubble_sort/code/csharp/BubbleSort.cs +++ /dev/null @@ -1,29 +0,0 @@ -// submitted by Julian Schacher (jspp) -using System; -using System.Collections.Generic; - -namespace BubbleSort -{ - public static class BubbleSort - { - public static List RunBubbleSort(List list) where T : IComparable - { - var length = list.Count; - - for (int i = 0; i < length; i++) - { - for (int j = 1; j < length; j++) - { - if (list[j - 1].CompareTo(list[j]) > 0) - { - var temp = list[j - 1]; - list[j - 1] = list[j]; - list[j] = temp; - } - } - } - - return list; - } - } -} diff --git a/contents/bubble_sort/code/csharp/Program.cs b/contents/bubble_sort/code/csharp/Program.cs deleted file mode 100644 index 19fbc94a6..000000000 --- a/contents/bubble_sort/code/csharp/Program.cs +++ /dev/null @@ -1,24 +0,0 @@ -// submitted by Julian Schacher (jspp) -using System; -using System.Collections.Generic; - -namespace BubbleSort -{ - class Program - { - static void Main(string[] args) - { - Console.WriteLine("BubbleSort"); - var listBubble = new List() { 1, 2, 6, 4, 9, 54, 3, 2, 7, 15 }; - Console.Write("unsorted: "); - foreach (var number in listBubble) - Console.Write(number + " "); - Console.WriteLine(); - listBubble = BubbleSort.RunBubbleSort(listBubble); - Console.Write("sorted: "); - foreach (var number in listBubble) - Console.Write(number + " "); - Console.WriteLine(); - } - } -} diff --git a/contents/bubble_sort/code/d/bubble_sort.d b/contents/bubble_sort/code/d/bubble_sort.d deleted file mode 100644 index 7ed957391..000000000 --- a/contents/bubble_sort/code/d/bubble_sort.d +++ /dev/null @@ -1,31 +0,0 @@ -import std.range : hasAssignableElements, isRandomAccessRange, hasLength; - -void bubbleSort(R)(ref R range) -if (isRandomAccessRange!R && hasAssignableElements!R && hasLength!R) -{ - import std.algorithm : swap; - - foreach (i; 0 .. range.length) { - bool isSorted = true; - foreach (j; 0 .. range.length - 1) - if (range[j + 1] < range[j]) { - swap(range[j + 1], range[j]); - isSorted = false; - } - if (isSorted) - return; - } -} - -void main() @safe -{ - import std.stdio : writefln; - import std.range : generate, take; - import std.array : array; - import std.random : uniform01; - - auto input = generate!(() => uniform01!float).take(10).array; - writefln!"before sorting:\n%s"(input); - bubbleSort(input); - writefln!"after sorting:\n%s"(input); -} diff --git a/contents/bubble_sort/code/emojicode/bubble_sort.emojic b/contents/bubble_sort/code/emojicode/bubble_sort.emojic deleted file mode 100644 index 491d7b734..000000000 --- a/contents/bubble_sort/code/emojicode/bubble_sort.emojic +++ /dev/null @@ -1,31 +0,0 @@ -🐇 🥇 🍇 - 🐇 ❗️ 🛁 numbers 🍨🐚💯🍆 🍇 - 🐔 numbers❗️ ➡️ count - - 🔂 i 🆕⏩⏩ 0 count❗️ 🍇 - 🔂 j 🆕⏩⏩ 1 count❗️ 🍇 - ↪️ 🐽 numbers j ➖ 1❗️ ▶️ 🐽 numbers j❗️ 🍇 - 🐽 numbers j ➖ 1❗️ ➡️ temp - 🐷 numbers j ➖ 1 🐽 numbers j❗️❗️ - 🐷 numbers j temp❗️ - 🍉 - 🍉 - 🍉 - 🍉 -🍉 - -🏁 🍇 - 🍨 1.7 -3.0 2.5 2.0 -6.0 4.4 50.0 7.0 1.5 -4.3 0.3 🍆 ➡️ numbers - - 😀 🔤unordered:🔤❗️ - 🔂 number numbers 🍇 - 😀 🔡 number 10❗️❗️ - 🍉 - - 🛁🐇🥇 numbers❗️ - - 😀 🔤ordered:🔤❗️ - 🔂 number numbers 🍇 - 😀 🔡 number 10❗️❗️ - 🍉 -🍉 diff --git a/contents/bubble_sort/code/fortran/bubble.f90 b/contents/bubble_sort/code/fortran/bubble.f90 deleted file mode 100644 index fb903f0d3..000000000 --- a/contents/bubble_sort/code/fortran/bubble.f90 +++ /dev/null @@ -1,42 +0,0 @@ -PROGRAM main - - IMPLICIT NONE - REAL(8), DIMENSION(10) :: A - - A = (/ 1d0, 3d0, 2d0, 4d0, 5d0, 10d0, 50d0, 7d0, 1.5d0, 0.3d0 /) - - WRITE(*,*) 'Input vector' - WRITE(*,'( F6.2 )') A - WRITE(*,*) ' ' - - CALL bubblesort(A) - - WRITE(*,*) 'Output vector' - WRITE(*,'(F6.2)') A - -CONTAINS - -SUBROUTINE bubblesort(array) - - IMPLICIT NONE - INTEGER :: array_length, i, j, n - REAL(8) :: tmp - REAL(8), DIMENSION(:), INTENT(INOUT) :: array - - array_length = size(array) - n = array_length - - DO i=1, n - DO j=1, n-1 - IF ( array(j) > array(j+1) ) THEN - - tmp = array(j+1) - array(j+1) = array(j) - array(j) = tmp - - END IF - END DO - END DO -END SUBROUTINE bubblesort - -END PROGRAM main diff --git a/contents/bubble_sort/code/go/bubbleSort.go b/contents/bubble_sort/code/go/bubbleSort.go deleted file mode 100644 index df620fe5c..000000000 --- a/contents/bubble_sort/code/go/bubbleSort.go +++ /dev/null @@ -1,30 +0,0 @@ -// Submitted by Chinmaya Mahesh (chin123) - -package main - -import "fmt" - -func bubbleSort(array []int) { - n := len(array) - for i := 0; i < n-1; i++ { - swapped := false - for j := 0; j < n-i-1; j++ { - if array[j] > array[j+1] { - array[j], array[j+1] = array[j+1], array[j] - swapped = true - } - } - if !swapped { - break - } - } -} - -func main() { - array := [10]int{1, 45, 756, 4569, 56, 3, 8, 5, -10, -4} - fmt.Println("Unsorted array:", array) - - bubbleSort(array[:]) - - fmt.Println("Sorted array:", array) -} diff --git a/contents/bubble_sort/code/haskell/bubbleSort.hs b/contents/bubble_sort/code/haskell/bubbleSort.hs deleted file mode 100644 index 50e2308a5..000000000 --- a/contents/bubble_sort/code/haskell/bubbleSort.hs +++ /dev/null @@ -1,6 +0,0 @@ -bubbleSort :: (Ord a) => [a] -> [a] -bubbleSort x = (!!length x) $ iterate bubble x - where bubble (x:y:r) - | x <= y = x : bubble (y:r) - | otherwise = y : bubble (x:r) - bubble x = x diff --git a/contents/bubble_sort/code/java/Bubble.java b/contents/bubble_sort/code/java/Bubble.java deleted file mode 100644 index 3ece7c965..000000000 --- a/contents/bubble_sort/code/java/Bubble.java +++ /dev/null @@ -1,30 +0,0 @@ -public class Bubble { - static void bubbleSort(int[] arr) { - for (int r = arr.length - 1; r > 0; r--) { - for (int i = 0; i < r; i++) { - if(arr[i] > arr[i + 1]) { - int tmp = arr[i]; - arr[i] = arr[i + 1]; - arr[i + 1] = tmp; - } - } - } - } - - public static void main(String[] args) { - int[] test = new int[]{20, -3, 50, 1, -6, 59}; - - System.out.println("Unsorted array :"); - for (int i = 0; i < test.length; i++) { - System.out.print(test[i] + " "); - } - - bubbleSort(test); - - System.out.println("\n\nSorted array :"); - for (int i = 0; i < test.length; i++) { - System.out.print(test[i] + " "); - } - System.out.println(""); - } -} diff --git a/contents/bubble_sort/code/javascript/bubble.js b/contents/bubble_sort/code/javascript/bubble.js deleted file mode 100644 index f1f36d971..000000000 --- a/contents/bubble_sort/code/javascript/bubble.js +++ /dev/null @@ -1,20 +0,0 @@ -function bubbleSort(arr) { - let tmp; - for (let i = 0; i < arr.length; i++) { - for (let k = 0; k < arr.length - 1; k++) { - if (arr[k] > arr[k + 1]) { - tmp = arr[k]; - arr[k] = arr[k + 1]; - arr[k + 1] = tmp; - } - } - } -} - -function main() { - const testArray = [1, 3, 2, 4, 5, 10, 50, 7, 1.5, 0.3]; - bubbleSort(testArray); - console.log(testArray); -} - -main(); diff --git a/contents/bubble_sort/code/julia/bubble.jl b/contents/bubble_sort/code/julia/bubble.jl deleted file mode 100644 index d4c970187..000000000 --- a/contents/bubble_sort/code/julia/bubble.jl +++ /dev/null @@ -1,19 +0,0 @@ -function bubble_sort!(a::Vector{Float64}) - n = length(a) - for i = 1:n - for j = 1:n-1 - if(a[j] < a[j+1]) - a[j], a[j+1] = a[j+1], a[j] - end - end - end -end - - -function main() - a = [1., 3, 2, 4, 5, 10, 50, 7, 1.5, 0.3] - bubble_sort!(a) - println(a) -end - -main() diff --git a/contents/bubble_sort/code/kotlin/BubbleSort.kt b/contents/bubble_sort/code/kotlin/BubbleSort.kt deleted file mode 100644 index 8cfa611da..000000000 --- a/contents/bubble_sort/code/kotlin/BubbleSort.kt +++ /dev/null @@ -1,18 +0,0 @@ -fun bubbleSort(input: MutableList) { - for (i in (input.size - 1) downTo 0) { - for (j in 0 until i) { - if (input[j] > input[j + 1]) { - input[j] = input[j + 1].also { - input[j + 1] = input[j] - } - } - } - } -} - -fun main(args: Array) { - var list = mutableListOf(4, 2, 9, 20, 11, 30, 1, 0); - println("Original $list") - bubbleSort(list) - println("Sorted $list") -} diff --git a/contents/bubble_sort/code/lua/bubble_sort.lua b/contents/bubble_sort/code/lua/bubble_sort.lua deleted file mode 100644 index 54476bfa5..000000000 --- a/contents/bubble_sort/code/lua/bubble_sort.lua +++ /dev/null @@ -1,16 +0,0 @@ -function bubble_sort(arr) - for i = 1,#arr-1 do - for j = 1,#arr-1 do - if arr[j] > arr[j+1] then - arr[j], arr[j+1] = arr[j+1], arr[j] - end - end - end -end - -local arr = {1, 45, 756, 4569, 56, 3, 8, 5, -10, -4} -print(("Unsorted array: {%s}"):format(table.concat(arr,", "))) - -bubble_sort(arr) - -print(("Sorted array: {%s}"):format(table.concat(arr,", "))) diff --git a/contents/bubble_sort/code/matlab/bubblesort.m b/contents/bubble_sort/code/matlab/bubblesort.m deleted file mode 100644 index 4c7378a48..000000000 --- a/contents/bubble_sort/code/matlab/bubblesort.m +++ /dev/null @@ -1,25 +0,0 @@ -function sorted_array = bubblesort(array) - for i = 1 : length(array) - for j = 1 : length(array) - i - if array(j) > array(j+1) - % swap elements in the list - temp = array(j); - array(j) = array(j+1); - array(j+1) = temp; - end - end - end - sorted_array = array; -end - -function main() - array = floor(rand(1, 7) * 100); - disp('Before Sorting:') - disp(array) - - array = bubble_sort(array); - disp('After Sorting:') - disp(array) -end - -main() diff --git a/contents/bubble_sort/code/nim/bubble_sort.nim b/contents/bubble_sort/code/nim/bubble_sort.nim deleted file mode 100644 index ad7c58d08..000000000 --- a/contents/bubble_sort/code/nim/bubble_sort.nim +++ /dev/null @@ -1,16 +0,0 @@ -proc print_array(a: openArray[int]) = - for n in 0 .. < len(a): - echo a[n] - -proc bubble_sort(a: var openArray[int]) = - for i in 0 .. < len(a) - 1: - for j in 0 .. < len(a) - 1: - if a[j + 1] < a[j]: - swap(a[j], a[j + 1]) - -var x: array[10,int] = [32, 32, 64, 16, 128, 8, 256, 4, 512, 2] -echo "Unsorted:" -print_array(x) -echo "\nSorted:" -bubble_sort(x) -print_array(x) diff --git a/contents/bubble_sort/code/php/bubble_sort.php b/contents/bubble_sort/code/php/bubble_sort.php deleted file mode 100644 index cb7ea77e9..000000000 --- a/contents/bubble_sort/code/php/bubble_sort.php +++ /dev/null @@ -1,25 +0,0 @@ - $arr[$j]) { - $tmp = $arr[$j - 1]; - $arr[$j - 1] = $arr[$j]; - $arr[$j] = $tmp; - } - } - } - - return $arr; -} - -$unsorted = [1, 2, 6, 47, 4, 9, 3, 7, 8, 23, 15]; -$bubble_sorted = bubble_sort($unsorted); - -printf('Unsorted: %s', implode(',', $unsorted)); -echo PHP_EOL; -printf('Sorted: %s', implode(',', $bubble_sorted)); -echo PHP_EOL; diff --git a/contents/bubble_sort/code/python/bubblesort.py b/contents/bubble_sort/code/python/bubblesort.py deleted file mode 100644 index 18485a73c..000000000 --- a/contents/bubble_sort/code/python/bubblesort.py +++ /dev/null @@ -1,17 +0,0 @@ -import random - - -def bubble_sort(array): - len_array = len(array) - for i in range(len_array): - for j in range(len_array - i - 1): - if(array[j] > array[j+1]): - array[j], array[j+1] = array[j+1], array[j] #swap elements in the list - -def main(): - random_array = [random.randint(0, 1000) for _ in range(10)] - print("Before Sorting {}".format(random_array)) - bubble_sort(random_array) - print("After Sorting {}".format(random_array)) - -main() diff --git a/contents/bubble_sort/code/racket/bubbleSort.rkt b/contents/bubble_sort/code/racket/bubbleSort.rkt deleted file mode 100644 index 320cddc78..000000000 --- a/contents/bubble_sort/code/racket/bubbleSort.rkt +++ /dev/null @@ -1,21 +0,0 @@ -#lang racket - -(provide bubbleSort) - - -(define bubbleSort - (case-lambda [(l) (bubbleSort l (length l))] - [(l n) (if (= n 1) - l - (bubbleSort (pass l 0 n) (- n 1)))])) - -; a single pass, if this is the nth pass, then we know that the (n - 1) last elements are already sorted -(define (pass l counter n) - (let ([x (first l)] - [y (second l)] - [r (drop l 2)]) - (cond [(= (- n counter) 2) (cons (min x y) (cons (max x y) r))] - [(cons (min x y) (pass (cons (max x y) r) (+ counter 1) n))]))) - - -((lambda (x) (display (bubbleSort x))) (read)) diff --git a/contents/bubble_sort/code/ruby/bubble.rb b/contents/bubble_sort/code/ruby/bubble.rb deleted file mode 100644 index 0511d06cc..000000000 --- a/contents/bubble_sort/code/ruby/bubble.rb +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env ruby - -def bubble_sort(arr) - (0..arr.length - 1).each do - (0..arr.length - 2).each do |k| - if arr[k] > arr[k + 1] - arr[k + 1], arr[k] = arr[k], arr[k + 1] - end - end - end - - return arr -end - -def main - range = [200, 79, 69, 45, 32, 5, 15, 88, 620, 125] - puts "The range before sorting is #{range}" - range = bubble_sort(range) - puts "The range after sorting is #{range}" -end - -main() diff --git a/contents/bubble_sort/code/rust/bubble_sort.rs b/contents/bubble_sort/code/rust/bubble_sort.rs deleted file mode 100644 index 104e43783..000000000 --- a/contents/bubble_sort/code/rust/bubble_sort.rs +++ /dev/null @@ -1,27 +0,0 @@ -extern crate rand; // External crate that provides random number generation tools - -use rand::distributions::Uniform; // Used for a uniform distribution -use rand::{thread_rng, Rng}; // Used for random number generation - -fn bubble_sort(a: &mut [u32]) { - let n = a.len(); - - for _ in 0..n { - for j in 1..n { - if a[j - 1] > a[j] { - a.swap(j, j - 1); - } - } - } -} - -fn main() { - let mut rng = thread_rng(); // Create random number generator - let num_range = Uniform::new_inclusive(0, 10000); // Obtain uniform distribution of range [0, 10000] - let mut rand_vec: Vec = rng.sample_iter(&num_range).take(10).collect(); - // Generates random values over that range, take 10 values from it and collect in vector - - println!("Before sorting: {:?}", rand_vec); - bubble_sort(&mut rand_vec); - println!("After sorting: {:?}", rand_vec); -} diff --git a/contents/bubble_sort/code/scala/bubble_sort.scala b/contents/bubble_sort/code/scala/bubble_sort.scala deleted file mode 100644 index 534a98eac..000000000 --- a/contents/bubble_sort/code/scala/bubble_sort.scala +++ /dev/null @@ -1,22 +0,0 @@ -object BubbleSort { - - def bubbleDown(list: List[Int]): List[Int] = - list match { - case a :: b :: tail if a < b => b :: bubbleDown(a :: tail) - case a :: b :: tail => a :: bubbleDown(b :: tail) - case _ => list - } - - def bubbleSort(list: List[Int]): List[Int] = - bubbleDown(list) match { - case unsorted :+ smallest => smallest :: bubbleDown(unsorted) - case _ => list - } - - def main(args: Array[String]): Unit = { - val unsorted = List(9, 2, 0, 5, 3, 8, 1, 9, 4, 0, 7, 0, 9, 9, 0) - - println("Unsorted list is " + unsorted) - println(" Sorted list is " + bubbleSort(unsorted)) - } -} diff --git a/contents/bubble_sort/code/scratch/bubble_sort.svg b/contents/bubble_sort/code/scratch/bubble_sort.svg deleted file mode 100644 index 333bf828f..000000000 --- a/contents/bubble_sort/code/scratch/bubble_sort.svg +++ /dev/null @@ -1,116 +0,0 @@ - - -defineBubbleSortsetito1setjto1repeatlengthofarepeatlengthofa-1Ifitemj+1ofa<itemjofathensettmptoitemjofareplaceitemjofawithitemj+1ofareplaceitemj+1ofawithtmpchangejby1setjto1changeiby1 \ No newline at end of file diff --git a/contents/bubble_sort/code/scratch/bubble_sort.txt b/contents/bubble_sort/code/scratch/bubble_sort.txt deleted file mode 100644 index 00c4f286b..000000000 --- a/contents/bubble_sort/code/scratch/bubble_sort.txt +++ /dev/null @@ -1,15 +0,0 @@ -define BubbleSort -set [i v] to (1) -set [j v] to (1) -repeat (length of [a v]) - repeat ((length of [a v]) - (1)) - If <(item ((j)+(1)) of [a v]) < (item (j) of [a v])> then - set [tmp v] to (item (j) of [a v]) - replace item (j) of [a v] with (item ((j)+(1)) of [a v] - replace item ((j)+(1)) of [a v] with (tmp) - end - change [j v] by (1) - end -set [j v] to (1) -change [i v] by (1) -end diff --git a/contents/bubble_sort/code/smalltalk/bubble.st b/contents/bubble_sort/code/smalltalk/bubble.st deleted file mode 100644 index 70dbaca1c..000000000 --- a/contents/bubble_sort/code/smalltalk/bubble.st +++ /dev/null @@ -1,18 +0,0 @@ -"Add this method to the SequenceableCollection class in the browser:" -SequenceableCollection>>bubbleSort - "Bubble sort for a collection." - | len swapper thisElem nextElem | - len := self size. - 1 to: len - 1 do: [ :iteration | - 1 to: len - 1 do: [ :index | - thisElem := self at: index. - nextElem := self at: index + 1. - (thisElem > nextElem) ifTrue: [ - self at: thisIndex + 1 put: thisElem. - self at: thisIndex put: nextElem. - ] - ] - ] - -"Then run this anywhere in your code: " -#(4 3 2 1 6 5) bubbleSort "outputs: #(1 2 3 4 5 6)" diff --git a/contents/bubble_sort/code/swift/bubblesort.swift b/contents/bubble_sort/code/swift/bubblesort.swift deleted file mode 100644 index 3552f2125..000000000 --- a/contents/bubble_sort/code/swift/bubblesort.swift +++ /dev/null @@ -1,20 +0,0 @@ -func bubbleSort(sortArray: inout [Int]) -> [Int] { - for i in (1.. sortArray[j+1] { - let temp = sortArray[j] - sortArray[j] = sortArray[j + 1] - sortArray[j + 1] = temp - } - } - } - - return sortArray -} - -func main() { - var testArray = [4,5,123,759,-132,8940,24,34,-5] - print(bubbleSort(sortArray: &testArray)) -} - -main() diff --git a/contents/bubble_sort/code/ti83basic/BUBLSORT.txt b/contents/bubble_sort/code/ti83basic/BUBLSORT.txt deleted file mode 100644 index 6a735472e..000000000 --- a/contents/bubble_sort/code/ti83basic/BUBLSORT.txt +++ /dev/null @@ -1,14 +0,0 @@ -:"SUBMITTED BY GIBUS WEARING BRONY" -:"L IS LENGTH OF LIST" -:dim(L1)→L -:For(A,1,L) -:For(X,1,L-1) -:If L1(X)>L1(X+1) -:Then -:L1(X)→Y -:L1(X+1)→L1(X) -:Y→L1(X+1) -:End -:End -:End -:Disp L1 diff --git a/contents/cc/license.txt b/contents/cc/license.txt index d4db0107c..1e11bba93 100644 --- a/contents/cc/license.txt +++ b/contents/cc/license.txt @@ -3,7 +3,7 @@ ##### Code Examples -The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/master/LICENSE.md)). +The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/main/LICENSE.md)). ##### Text @@ -12,7 +12,7 @@ The text of this chapter was written by [James Schloss](https://github.com/leios [

](https://creativecommons.org/licenses/by-sa/4.0/) ##### Images/Graphics -- The image "[example Image](res/example.png)" was created by [James Schloss](https://github.com/leios) and is licenced under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[example Image](res/example.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). ##### Pull Requests diff --git a/contents/chans_algorithm/chans_algorithm.md b/contents/chans_algorithm/chans_algorithm.md index 62e125114..18daaa889 100644 --- a/contents/chans_algorithm/chans_algorithm.md +++ b/contents/chans_algorithm/chans_algorithm.md @@ -10,7 +10,7 @@ MathJax.Hub.Queue(["Typeset",MathJax.Hub]); ##### Code Examples -The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/master/LICENSE.md)). +The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/main/LICENSE.md)). ##### Text diff --git a/contents/choosing_a_language/choosing_a_language.md b/contents/choosing_a_language/choosing_a_language.md index 88ad8c986..5385c309a 100644 --- a/contents/choosing_a_language/choosing_a_language.md +++ b/contents/choosing_a_language/choosing_a_language.md @@ -73,7 +73,7 @@ Please let me know which languages you want to cover and I'll add them here! ##### Code Examples -The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/master/LICENSE.md)). +The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/main/LICENSE.md)). ##### Text diff --git a/contents/code_reviews/code_reviewers.md b/contents/code_reviews/code_reviewers.md new file mode 100644 index 000000000..e2fd22b0a --- /dev/null +++ b/contents/code_reviews/code_reviewers.md @@ -0,0 +1,70 @@ +# Code Reviewers + +If you are comfortable reviewing a language, please add yourself to the table below: + +[//]: # (To add yourself to the table, please add a link at the bottom of the article and reference yourself in the following format next to each language [@username]. If there are multiple usernames for a language, make sure to seperate them with commas) + +Language | Reviewers +-|- +asm-x64 | +bash | +c | [@amaras] +c# | +clojure | +coconut | [@amaras] +c++ | [@ShadowMitia] +crystal | +D | +dart | +elm | +emojicode | +factor | +fortran | [@leios] +gnuplot | [@leios] +go | +haskell | +java | +javascript | [@ntindle],[@ShadowMitia] +julia | [@leios] +kotlin | +labview | +lolcode | +lisp | +lua | +matlab | +nim | +ocaml | [@ShadowMitia] +php | +piet | +powershell | +python | [@ntindle],[@ShadowMitia],[@amaras],[@PeanutbutterWarrior] +r | +racket | +ruby | +rust | [@ShadowMitia],[@PeanutbutterWarrior] +scala | +scheme | +scratch | [@leios] +smalltask | +swift | +typescript | [@ntindle] +v | +viml | +whitespace | + +If you are comfortable reviewing a toolchain change, please add yourself to the list below: + +Feature | Reviewers +-|- +Dev Container | [@ntindle], [@ShadowMitia] +Docker | [@ntindle], [@ShadowMitia] +Github Actions | [@ntindle] +Honkit | +Scons | [@amaras],[@PeanutbutterWarrior] +Chapters | [@leios] + +[@leios]: https://github.com/leios +[@ntindle]: https://github.com/ntindle +[@amaras]: https://github.com/amaras +[@ShadowMitia]: https://github.com/ShadowMitia +[@PeanutbutterWarrior]: https://github.com/PeanutbutterWarrior diff --git a/contents/compiled_languages/compiled_languages.md b/contents/compiled_languages/compiled_languages.md index 3f6424359..b8bac2e75 100644 --- a/contents/compiled_languages/compiled_languages.md +++ b/contents/compiled_languages/compiled_languages.md @@ -35,7 +35,7 @@ I just find it easier to avoid GUI's whenever possible. ##### Code Examples -The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/master/LICENSE.md)). +The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/main/LICENSE.md)). ##### Text diff --git a/contents/computational_geometry/computational_geometry.md b/contents/computational_geometry/computational_geometry.md index a99c0a4e9..bcd702fd7 100644 --- a/contents/computational_geometry/computational_geometry.md +++ b/contents/computational_geometry/computational_geometry.md @@ -12,7 +12,7 @@ MathJax.Hub.Queue(["Typeset",MathJax.Hub]); ##### Code Examples -The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/master/LICENSE.md)). +The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/main/LICENSE.md)). ##### Text diff --git a/contents/computer_graphics/computer_graphics.md b/contents/computer_graphics/computer_graphics.md new file mode 100644 index 000000000..72e170c46 --- /dev/null +++ b/contents/computer_graphics/computer_graphics.md @@ -0,0 +1,17 @@ +# Computer Graphics + +Of all areas of computer science research, none have had more of an immediate impact on multimedia than computer graphics. +This sub-field is distinctly different than computational science in that it focuses on the *appearance* of realistic details, instead of computing those details precisely. +Where a computational scientist might spend years writing software that runs on the fastest computers known to man to simulate climate, the computer graphics researcher might apply machine learning to create fluid simulations that look good enough to the untrained eye. +In the end, the computational scientist will have a plot and the computer graphics researcher will have a beautifully rendered simulation. + +Though I may have painted computer graphics to be a bit hand-wavey, that could not be further from the truth! +Instead, I would argue that this field of research provides the closest approximation to realistic visualizations that desktop hardware can currently support. +Many art and video game studios are interested in telling a complete story via computational media, and this simply would not be possible without the rigorous efforts of researchers from around the world. +This is why Pixar hires researchers and will actively publish their findings after their movies are released. + +Though the boundary between computer science research fields is a bit vague, for the purposes of the Algorithm Archive, we will broadly classify computer graphics as anything with direct applications to images or fields that can be represented as images. +Convolutions, for example, would not be considered part of computer graphics because they are used widely in all areas of computer science research; however, Canny edge detection will be. +We will also be covering a wide range of applications that are used for rendering high-resolution graphics and computational art. + +As with all sections to the Algorithm Archive, this is a work in progress and subject to change, so feel free to let me know what you think! diff --git a/contents/computus/code/c/gauss_easter.c b/contents/computus/code/c/gauss_easter.c new file mode 100644 index 000000000..e168179a2 --- /dev/null +++ b/contents/computus/code/c/gauss_easter.c @@ -0,0 +1,68 @@ +#include + +char *computus(int year, int servois, char *out, size_t out_size) { + // Year's position on the 19 year metonic cycle + int a = year % 19; + + // Century index + int k = year / 100; + + //Shift of metonic cycle, add a day offset every 300 years + int p = (13 + 8 * k) / 25; + + // Correction for non-observed leap days + int q = k / 4; + + // Correction to starting point of calculation each century + int M = (15 - p + k - q) % 30; + + // Number of days from March 21st until the full moon + int d = (19 * a + M) % 30; + + // Returning if user wants value for Servois' table + if (servois) { + snprintf(out, out_size, "%d",(21 + d) % 31); + return out; + } + + // Finding the next Sunday + // Century-based offset in weekly calculation + int N = (4 + k - q) % 7; + + // Correction for leap days + int b = year % 4; + int c = year % 7; + + // Days from d to next Sunday + int e = (2 * b + 4 * c + 6 * d + N) % 7; + + // Historical corrections for April 26 and 25 + if ((d == 29 && e == 6) || (d == 28 && e == 6 && a > 10)) { + e = -1; + } + + if ((22 + d + e) > 31) { + snprintf(out, out_size, "April %d", d + e - 9); + } else { + snprintf(out, out_size, "March %d", 22 + d + e); + } + + return out; +} + +int main() { + char tmp1[9], tmp2[9]; + + printf("The following are the dates of the Paschal full moon (using " + "Servois notation) and the date of Easter for 2020-2030 AD:\n"); + + printf("Year\tServois number\tEaster\n"); + + for (int year = 2020; year <= 2030; year++) { + printf("%d\t\t%s\t%s\n", year, computus(year, 1, tmp1, 9), + computus(year, 0, tmp2, 9)); + } + + return 0; +} + diff --git a/contents/computus/code/clisp/gauss-easter.lisp b/contents/computus/code/clisp/gauss-easter.lisp new file mode 100644 index 000000000..5ed6ae51a --- /dev/null +++ b/contents/computus/code/clisp/gauss-easter.lisp @@ -0,0 +1,34 @@ +;;;; Gauss's Easter algorithm implementation + +(defun computus (year &optional (servois nil)) + "Calculates the day of Easter for a given year and optionally its Servois number" + (let* + ((a (mod year 19)) ; year's position on the 19 year metonic cycle + (k (floor year 100)) ; century index + (p (floor (+ 13 (* 8 k)) 25)) ; shift of metonic cycle, add a day offset every 300 years + (q (floor k 4)) ; correction for non-observed leap days + (m (mod (+ 15 (- p) k (- q)) 30)) ; correction to starting point of calculation each century + (d (mod (+ (* 19 a) m) 30)) ; number of days from March 21st until the full moon + (n (mod (+ 4 k (- q)) 7)) ; century-based offset in weekly calculation + (b (mod year 4)) ; correction for leap days + (c (mod year 7)) ; also a correction for leap days + ;; days from d to next Sunday + (e (mod (+ (* 2 b) (* 4 c) (* 6 d) n) 7))) + ;; historical corrections for April 26 and 25 + (when (or (and (eql d 29) (eql e 6)) (and (eql d 28) (eql e 6) (> a 10))) + (setf e -1)) + (values + ;; determination of the correct month for Easter + (if (> (+ 22 d e) 31) + (format nil "April ~a" (+ d e -9)) + (format nil "March ~a" (+ 22 d e))) + ;; optionally return a value for the Servois' table + (if servois (mod (+ 21 d) 31))))) + +(format t "~{~a~%~}" + '("The following are the dates of the Paschal full moon (using Servois" + "notation) and the date of Easter for 2020-2030 AD:~%" + "Year Servois number Easter")) +(loop for year from 2020 to 2030 do + (multiple-value-bind (easter servois) (computus year t) + (format t "~8a~18a~a~%" year servois easter))) diff --git a/contents/computus/code/cpp/gauss_easter.cpp b/contents/computus/code/cpp/gauss_easter.cpp new file mode 100644 index 000000000..0f0c1cf78 --- /dev/null +++ b/contents/computus/code/cpp/gauss_easter.cpp @@ -0,0 +1,60 @@ +#include +#include + +std::string computus(int year, bool servois = false) { + // Year's position on the 19 year metonic cycle + int a = year % 19; + + // Century index + int k = year / 100; + + // Shift of metonic cycle, add a day offset every 300 years + int p = (13 + 8 * k) / 25; + + // Correction for non-observed leap days + int q = k / 4; + + // Correction to starting point of calculation each century + int M = (15 - p + k - q) % 30; + + // Number of days from March 21st until the full moon + int d = (19 * a + M) % 30; + + // Returning if user wants value for Servois' table + if (servois) { + return std::to_string((21 + d) % 31); + } + + // Finding the next Sunday + // Century-based offset in weekly calculation + int N = (4 + k - q) % 7; + + // Correction for leap days + int b = year % 4; + int c = year % 7; + + // Days from d to next Sunday + int e = (2 * b + 4 * c + 6 * d + N) % 7; + + // Historical corrections for April 26 and 25 + if ((d == 29 && e == 6) || (d == 28 && e == 6 && a > 10)) { + e = -1; + } + + // Determination of the correct month for Easter + return 22 + d + e > 31 ? "April " + std::to_string(d + e - 9) + : "March " + std::to_string(22 + d + e); +} + +// Here, we will output the date of the Paschal full moon (using Servois +// notation), and Easter for 2020-2030 +int main() { + std::cout << "The following are the dates of the Paschal full moon (using " + "Servois notation) and the date of Easter for 2020-2030 AD:\n" + "Year\tServois number\tEaster\n"; + + for (int year = 2020; year <= 2030; year++) { + std::cout << year << "\t\t" << computus(year, true) << '\t' + << computus(year) << std::endl; + } +} diff --git a/contents/computus/code/crystal/gauss_easter.cr b/contents/computus/code/crystal/gauss_easter.cr new file mode 100644 index 000000000..9f87d573e --- /dev/null +++ b/contents/computus/code/crystal/gauss_easter.cr @@ -0,0 +1,64 @@ +def computus(year, servois = false) + # Year's position on the 19 year metonic cycle + a = year % 19 + + # Century index + k = year // 100 + + # Shift of metonic cycle, add a day offset every 300 years + p = (13 + 8 * k) // 25 + + # Correction for non-observed leap days + q = k // 4 + + # Correction to starting point of calculation each century + m = (15 - p + k - q) % 30 + + # Number of days from March 21st until the full moon + d = (19 * a + m) % 30 + + # Returning if user wants value for Servois' table + if servois + return ((21 + d) % 31).to_s + end + + # Finding the next Sunday + # Century-based offset in weekly calculation + n = (4 + k - q) % 7 + + # Correction for leap days + b = year % 4 + c = year % 7 + + # Days from d to next Sunday + e = (2 * b + 4 * c + 6 * d + n) % 7 + + # Historical corrections for April 26 and 25 + if (d == 29 && e == 6) || (d == 28 && e == 6 && a > 10) + e = -1 + end + + # Determination of the correct month for Easter + if (22 + d + e > 31) + return "April " + (d + e - 9).to_s + else + return "March " + (22 + d + e).to_s + end +end + +# Here, we will output the date of the Paschal full moon +# (using Servois notation), and Easter for 2020-2030 +def main + a = (2020..2030).to_a + servois_numbers = a.map { |y| computus(y, servois = true) } + easter_dates = a.map { |y| computus(y) } + + puts "The following are the dates of the Paschal full moon (using Servois " + + "notation) and the date of Easter for 2020-2030 AD:" + puts "Year\tServois number\tEaster" + a.each_index { |i| + puts "#{a[i]}\t#{servois_numbers[i]}\t\t#{easter_dates[i]}" + } +end + +main diff --git a/contents/computus/code/dart/gauss_easter.dart b/contents/computus/code/dart/gauss_easter.dart new file mode 100644 index 000000000..ab48aab20 --- /dev/null +++ b/contents/computus/code/dart/gauss_easter.dart @@ -0,0 +1,63 @@ +String computus(int year, {bool servois = false}) { + // Year's position in metonic cycle + final a = year % 19; + + // Century index + final k = (year / 100).floor(); + + // Shift of metonic cycle, add a day offset every 300 years + final p = ((13 + 8 * k) / 25).floor(); + + // Correction for non-observed leap days + final q = (k / 4).floor(); + + // Correction to starting point of calculation each century + final M = (15 - p + k - q) % 30; + + // Number of days from March 21st until the full moon + final d = (19 * a + M) % 30; + + // Returning if user wants value for Servois' table + if (servois) { + return ((21 + d) % 31).toString(); + } + + // Finding the next Sunday + // Century-based offset in weekly calculation + final N = (4 + k - q) % 7; + + // Correction for leap days + final b = year % 4; + final c = year % 7; + + // Days from d to next Sunday + var e = (2 * b + 4 * c + 6 * d + N) % 7; + + // Historical corrections for April 26 and 25 + if (e == 6) { + if (d == 29 || (d == 28 && a > 10)) { + e = -1; + } + } + + // Determination of the correct month for Easter + if (22 + d + e > 31) { + return 'April ${d + e - 9}'; + } else { + return 'March ${22 + d + e}'; + } +} + +void main() { + print("The following are the dates of the Paschal full moon (using Servois " + + "notation) and the date of Easter for 2020-2030 AD:"); + + print("Year\tServois number\tEaster"); + + for (var year = 2020; year <= 2030; year++) { + final servoisNumber = computus(year, servois: true); + final easterDate = computus(year); + + print('$year\t$servoisNumber\t\t$easterDate'); + } +} diff --git a/contents/computus/code/haskell/gauss_easter.hs b/contents/computus/code/haskell/gauss_easter.hs new file mode 100644 index 000000000..a6ab70244 --- /dev/null +++ b/contents/computus/code/haskell/gauss_easter.hs @@ -0,0 +1,55 @@ +data Mode = Servois | Easter + +computus :: Mode -> Int -> String +computus mode year = + case mode of + Servois -> + -- Value for Servois' table + show $ (21 + d) `mod` 31 + Easter -> + -- Determination of the correct month for Easter + if 22 + d + f > 31 + then "April " ++ show (d + f - 9) + else "March " ++ show (22 + d + f) + where + a, b, c, d, e, f, k, m, n, p, q :: Int + -- Year's position on the 19 year metonic cycle + a = year `mod` 19 + -- Century index + k = year `div` 100 + -- Shift of metonic cycle, add a day offset every 300 years + p = (13 + 8 * k) `div` 25 + -- Correction for non-observed leap days + q = k `div` 4 + -- Correction to starting point of calculation each century + m = (15 - p + k - q) `mod` 30 + -- Number of days from March 21st until the full moon + d = (19 * a + m) `mod` 30 + -- Finding the next Sunday + -- Century-based offset in weekly calculation + n = (4 + k - q) `mod` 7 + -- Correction for leap days + b = year `mod` 4 + c = year `mod` 7 + -- Days from d to next Sunday + e = (2 * b + 4 * c + 6 * d + n) `mod` 7 + -- Historical corrections for April 26 and 25 + f = + if (d == 29 && e == 6) || (d == 28 && e == 6 && a > 10) + then -1 + else e + +-- Here, we will output the date of the Paschal full moon +-- (using Servois notation), and Easter for 2020-2030 +main :: IO () +main = do + let years :: [Int] + years = [2020 .. 2030] + servoisNumbers, easterDates :: [String] + servoisNumbers = map (computus Servois) years + easterDates = map (computus Easter) years + putStrLn "The following are the dates of the Paschal full moon (using Servois notation) and the date of Easter for 2020-2030 AD:" + putStrLn "Year\tServois number\tEaster" + let conc :: Int -> String -> String -> String + conc y s e = show y ++ "\t" ++ s ++ "\t\t" ++ e + mapM_ putStrLn $ zipWith3 conc years servoisNumbers easterDates diff --git a/contents/computus/code/javascript/gauss_easter.js b/contents/computus/code/javascript/gauss_easter.js new file mode 100644 index 000000000..f413644f5 --- /dev/null +++ b/contents/computus/code/javascript/gauss_easter.js @@ -0,0 +1,91 @@ +/** + * In this code, the modulus operator is used. + * However, this operator in javascript/typescript doesn't support negative numbers. + * So, where there may be negative numbers, the function mod is used. + * This function gives the modulo of any relative number a + */ + +/** + * @param {number} a + * @param {number} b + * @returns {number} + */ +function mod(a, b) { + if (a < 0) return mod(a + b, b); + else return a % b; +} + +/** + * @param {number} year + * @param {boolean} [servois=false] + * @returns {string} + */ +function computus(year, servois = false) { + // Year's position in metonic cycle + const a = year % 19; + + // Century index + const k = Math.floor(year / 100); + + // Shift of metonic cycle, add a day offset every 300 years + const p = Math.floor((13 + 8 * k) / 25); + + // Correction for non-observed leap days + const q = Math.floor(k / 4); + + // Correction to starting point of calculation each century + const M = mod(15 - p + k - q, 30); + + // Number of days from March 21st until the full moon + const d = (19 * a + M) % 30; + + // Returning if user wants value for Servois' table + if (servois) { + return ((21 + d) % 31).toString(); + } + + // Finding the next Sunday + // Century-based offset in weekly calculation + const N = mod(4 + k - q, 7); + + // Correction for leap days + const b = year % 4; + const c = year % 7; + + // Days from d to next Sunday + let e = (2 * b + 4 * c + 6 * d + N) % 7; + + // Historical corrections for April 26 and 25 + if (e === 6) { + if (d === 29 || (d === 28 && a > 10)) { + e = -1; + } + } + + // Determination of the correct month for Easter + if (22 + d + e > 31) { + return `April ${d + e - 9}`; + } else { + return `March ${22 + d + e}`; + } +} + +console.log( + "The following are the dates of the Paschal full moon (using Servois " + + "notation) and the date of Easter for 2020-2030 AD:" +); + +const values = []; + +for (let year = 2020; year <= 2030; year++) { + const servoisNumber = computus(year, true); + const easterDate = computus(year); + + // Creation of an object to be displayed as a line in the output table + values[year] = { + "servois number": +servoisNumber, + easter: easterDate, + }; +} + +console.table(values); diff --git a/contents/computus/code/julia/gauss_easter.jl b/contents/computus/code/julia/gauss_easter.jl new file mode 100644 index 000000000..676cad4f6 --- /dev/null +++ b/contents/computus/code/julia/gauss_easter.jl @@ -0,0 +1,62 @@ +function computus(year; servois=false) + + # Year's position on the 19 year metonic cycle + a = mod(year, 19) + + # Century index + k = fld(year, 100) + + # Shift of metonic cycle, add a day offset every 300 years + p = fld(13 + 8 * k, 25) + + # Correction for non-observed leap days + q = fld(k, 4) + + # Correction to starting point of calculation each century + M = mod(15 - p + k - q, 30) + + # Number of days from March 21st until the full moon + d = mod(19 * a + M, 30) + + # Returning if user wants value for Servois' table + if servois + return string(mod(21 + d,31)) + end + + # Finding the next Sunday + # Century-based offset in weekly calculation + N = mod(4 + k - q, 7) + + # Correction for leap days + b = mod(year, 4) + c = mod(year, 7) + + # Days from d to next Sunday + e = mod(2 * b + 4 * c + 6 * d + N, 7) + + # Historical corrections for April 26 and 25 + if (d == 29 && e == 6) || (d == 28 && e == 6 && a > 10) + e = -1 + end + + # Determination of the correct month for Easter + if(22 + d + e > 31) + return "April " * string(d + e - 9) + else + return "March " * string(22 + d + e) + end +end + +# Here, we will output the date of the Paschal full moon +# (using Servois notation), and Easter for 2020-2030 + +a = collect(2020:2030) +servois_numbers = computus.(a; servois=true) +easter_dates = computus.(a) + +println("The following are the dates of the Paschal full moon (using Servois " * + "notation) and the date of Easter for 2020-2030 AD:") +println("Year\tServois number\tEaster") +for i = 1:length(a) + println("$(a[i])\t$(servois_numbers[i])\t\t$(easter_dates[i])") +end diff --git a/contents/computus/code/nim/gauss_easter.nim b/contents/computus/code/nim/gauss_easter.nim new file mode 100644 index 000000000..f8d1e40de --- /dev/null +++ b/contents/computus/code/nim/gauss_easter.nim @@ -0,0 +1,45 @@ +import strformat + +func computus(year: int, servois: bool = false): string = + let + # Year's position on the 19 year metonic cycle + a = year mod 19 + # Century index + k = year div 100 + # Shift of metonic cycle, add a day offset every 300 years + p = (13 + 8 * k) div 25 + # Correction for non-observed leap days + q = k div 4 + # Correction to starting point of calculation each century + m = (15 - p + k - q) mod 30 + # Number of days from March 21st until the full moon + d = (19 * a + m) mod 30 + # Returning of user wants value for Servois' table + if servois: + return $((21 + d) mod 31) + let + # Find the next Sunday + # Century-based offset in weekly calculation + n = (4 + k - q) mod 7 + # Correction for leap days + b = year mod 4 + c = year mod 7 + # Days from d to next Sunday + temp_e = (2 * b + 4 * c + 6 * d + n) mod 7 + # Historical corrections for April 26 and 25 + e = if (d == 29 and temp_e == 6) or (d == 28 and temp_e == 6 and a > 10): + -1 + else: + temp_e + # Determination of the correct month for Easter + if (22 + d + e) > 31: + result = "April {d + e - 9}".fmt + else: + result = "March {22 + d + e}".fmt + +when isMainModule: + echo "The following are the dates of the Paschal full moon (using Servois " + echo "notation) and the date of Easter for 2020-2030 AD:" + echo "Year Servois number Easter" + for year in 2020..2030: + echo "{year} {computus(year, true):14} {computus(year, false):6}".fmt diff --git a/contents/computus/code/powershell/gauss_easter.ps1 b/contents/computus/code/powershell/gauss_easter.ps1 new file mode 100644 index 000000000..be07125a3 --- /dev/null +++ b/contents/computus/code/powershell/gauss_easter.ps1 @@ -0,0 +1,60 @@ +function Calculate-Computus([int]$Year, [switch]$Servois) { + + # Year's position on the 19 year metonic cycle + $a = $Year % 19 + + # Century index + $k = [Math]::Floor($Year / 100) + + # Shift of metonic cycle, add a day offset every 300 years + $p = [Math]::Floor((13 + 8 * $k) / 25) + + # Correction for non-observed leap days + $q = [Math]::Floor($k / 4) + + # Correction to starting point of calculation each century + $M = (15 - $p + $k - $q) % 30 + + # Number of days from March 21st until the full moon + $d = (19 * $a + $M) % 30 + + # Returning if user wants value for Servois' table + if($Servois) { + return ((21 + $d) % 31).ToString() + } + + # Finding the next Sunday + # Century-based offset in weekly calculation + $N = (4 + $k - $q) % 7 + + # Correction for leap days + $b = $Year % 4 + $c = $Year % 7 + + # Days from d to next Sunday + $e = (2 * $b + 4 * $c + 6 * $d + $N) % 7 + + # Historical corrections for April 26 and 25 + if(($d -eq 29 -and $e -eq 6) -or ($d -eq 28 -and $e -eq 6 -and $a -gt 10)) { + $e = -1 + } + + # Determination of the correct month for Easter + if(22 + $d + $e -gt 31) { + return "April " + ($d + $e - 9) + } + else { + return "March " + (22 + $d + $e) + } +} + + +# Here, we will output the date of the Paschal full moon +# (using Servois notation), and Easter for 2020-2030 + +Write-Host "The following are the dates of the Paschal full moon (using Servois", + "notation) and the date of Easter for 2020-2030 AD:" +Write-Host "Year`tServois number`tEaster" +foreach($year in 2020..2030) { + Write-Host "$year`t$(Calculate-Computus $year -Servois)`t`t$(Calculate-Computus $year)" +} \ No newline at end of file diff --git a/contents/computus/code/python/gauss_easter.py b/contents/computus/code/python/gauss_easter.py new file mode 100644 index 000000000..97e514276 --- /dev/null +++ b/contents/computus/code/python/gauss_easter.py @@ -0,0 +1,55 @@ +def computus(year, servois=False): + # Year's position on the 19-year metonic cycle + a = year % 19 + + # Century index + k = year // 100 + + # Shift of metonic cycle, add a day offset every 300 years + p = (13 + 8 * k) // 25 + + # Correction for non-observed leap days + q = k // 4 + + # Correction to starting point of calculation each century + M = (15 - p + k - q) % 30 + + # Number of days from March 21st until the full moon + d = (19 * a + M) % 30 + + # Returning if user wants value for Servois' table + if servois: + return str((21 + d) % 31) + + # Finding the next Sunday + # Century-based offset in weekly calculation + N = (4 + k - q) % 7 + + # Correction for leap days + b = year % 4 + c = year % 7 + + # Days from d to next Sunday + e = (2 * b + 4 * c + 6 * d + N) % 7 + + # Historical corrections for April 26 and 25 + if (d == 29 and e == 6) or (d == 28 and e == 6 and a > 10): + e = -1 + + # Determination of the correct month for Easter + if 22 + d + e > 31: + return "April " + str(d + e - 9) + else: + return "March " + str(22 + d + e) + + +# Here, we will output the date of the Paschal full moon +# (using Servois notation), and Easter for 2020-2030 + +print( + "The following are the dates of the Paschal full moon (using Servois", + "notation) and the date of Easter for 2020-2030 AD:", +) +print("Year\tServois number\tEaster") +for year in range(2020, 2031): + print(f"{year}\t{computus(year, servois=True)}\t\t{computus(year)}") diff --git a/contents/computus/code/rust/gauss_easter.rs b/contents/computus/code/rust/gauss_easter.rs new file mode 100644 index 000000000..3ada88184 --- /dev/null +++ b/contents/computus/code/rust/gauss_easter.rs @@ -0,0 +1,69 @@ +fn computus(year: usize, servois: bool) -> String { + // Year's position on the 19 year metonic cycle + let a = year % 19; + + // Century index + let k = year / 100; // NOTE: dividing integers always truncates the result + + // Shift of metonic cycle, add a day offset every 300 years + let p = (13 + 8 * k) / 25; + + // Correction for non-observed leap days + let q = k / 4; + + // Correction to starting point of calculation each century + let m = (15 - p + k - q) % 30; + + // Number of days from March 21st until the full moon + let d = (19 * a + m) % 30; + + if servois { + return ((21 + d) % 31).to_string(); + } + + // Finding the next Sunday + // Century-based offset in weekly calculation + let n = (4 + k - q) % 7; + + // Correction for leap days + let b = year % 4; + let c = year % 7; + + // Days from d to next Sunday + let temp_e = ((2 * b + 4 * c + 6 * d + n) % 7) as isize; + + // Historical corrections for April 26 and 25 + let e = if (d == 29 && temp_e == 6) || (d == 28 && temp_e == 6 && a > 10) { + -1 + } else { + temp_e + }; + + // Determination of the correct month for Easter + if (22 + d) as isize + e > 31 { + format!("April {}", d as isize + e - 9) + } else { + format!("March {}", 22 + d as isize + e) + } +} + +fn main() { + // Here, we will output the date of the Paschal full moon + // (using Servois notation), and Easter for 2020-2030 + + let years = 2020..=2030; + + println!( + "The following are the dates of the Paschal full moon (using \ + Servois notation) and the date of Easter for 2020-2030 AD:" + ); + println!("Year\tServois number\tEaster"); + years.for_each(|year| { + println!( + "{}\t{:<14}\t{}", + year, + computus(year, true), + computus(year, false), + ) + }); +} diff --git a/contents/computus/code/scala/gauss_easter.scala b/contents/computus/code/scala/gauss_easter.scala new file mode 100644 index 000000000..01f5a4c30 --- /dev/null +++ b/contents/computus/code/scala/gauss_easter.scala @@ -0,0 +1,58 @@ +object GaussEaster { + def computus(year : Int, servois: Boolean = false): String = { + + // Year's position on the 19 year metonic cycle + val a = year % 19 + + // Century index + val k = (year / 100).toInt + + // Shift of metonic cycle, add a day offset every 300 years + val p = ((13 + 8 * k) / 25).toInt + + // Correction for non-observed leap days + val q = (k / 4).toInt + + // Correction to starting point of calculation each century + val M = (15 - p + k - q) % 30 + + // Number of days from March 21st until the full moon + val d = (19 * a + M) % 30 + + // Returning if user wants value for Servois' table + if (servois) + return s"${(21 + d) % 31}" + + // Finding the next Sunday + // Century-based offset in weekly calculation + val N = (4 + k - q) % 7 + + // Correction for leap days + val b = year % 4 + val c = year % 7 + + // Days from d to next Sunday + var e = (2 * b + 4 * c + 6 * d + N) % 7 + + // Historical corrections for April 26 and 25 + if ((d == 29 && e == 6) || (d == 28 && e == 6 && a > 10)) { + e = -1 + } + + // Determination of the correct month for Easter + if (22 + d + e > 31) + s"April ${d + e - 9}" + else + s"March ${22 + d + e}" + } + + def main(args: Array[String]): Unit = { + println("The following are the dates of the Paschal full moon (using " + + "Servois notation) and the date of Easter for 2020-2030 AD:\n" + + "Year\tServois number\tEaster\n") + + for( year <- 2020 to 2030){ + println(s"$year \t\t ${computus(year, true)} \t${computus(year)}") + } + } +} \ No newline at end of file diff --git a/contents/computus/code/typescript/gauss_easter.ts b/contents/computus/code/typescript/gauss_easter.ts new file mode 100644 index 000000000..789b9bd7e --- /dev/null +++ b/contents/computus/code/typescript/gauss_easter.ts @@ -0,0 +1,91 @@ +/** + * In this code, the modulus operator is used. + * However, this operator in javascript/typescript doesn't support negative numbers. + * So, where there may be negative numbers, the function mod is used. + * This function gives the modulo of any relative number a + */ + +function mod(a: number, b: number): number { + if (a < 0) { + return mod(a + b, b); + } else { + return a % b; + } +} +function computus(year: number, servois: boolean = false): string { + // Year's position in metonic cycle + const a: number = year % 19; + + // Century index + const k: number = Math.floor(year / 100); + + // Shift of metonic cycle, add a day offset every 300 years + const p: number = Math.floor((13 + 8 * k) / 25); + + // Correction for non-observed leap days + const q: number = Math.floor(k / 4); + + // Correction to starting point of calculation each century + const M: number = mod(15 - p + k - q, 30); + + // Number of days from March 21st until the full moon + const d: number = (19 * a + M) % 30; + + // Returning if user wants value for Servois' table + if (servois) { + return ((21 + d) % 31).toString(); + } + + // Finding the next Sunday + // Century-based offset in weekly calculation + const N: number = mod(4 + k - q, 7); + + // Correction for leap days + const b: number = year % 4; + const c: number = year % 7; + + // Days from d to next Sunday + let e: number = (2 * b + 4 * c + 6 * d + N) % 7; + + // Historical corrections for April 26 and 25 + if (e === 6) { + if (d === 29 || (d === 28 && a > 10)) { + e = -1; + } + } + + // Determination of the correct month for Easter + if (22 + d + e > 31) { + return `April ${d + e - 9}`; + } else { + return `March ${22 + d + e}`; + } +} + +console.log( + "The following are the dates of the Paschal full moon (using Servois " + + "notation) and the date of Easter for 2020-2030 AD:" +); + +// Type of a line in the output table +interface IOutputLine { + "servois number": number; + easter: string; +} + +const values: IOutputLine[] = []; + +for (let year = 2020; year <= 2030; year++) { + const servoisNumber: string = computus(year, true); + const easterDate: string = computus(year); + + // Creation of an object to be displayed as a line in the output table + const line: IOutputLine = { + "servois number": +servoisNumber, + easter: easterDate, + }; + + values[year] = line; +} + +console.table(values); diff --git a/contents/computus/computus.md b/contents/computus/computus.md new file mode 100644 index 000000000..1eeec678d --- /dev/null +++ b/contents/computus/computus.md @@ -0,0 +1,360 @@ +# The Easter Algorithm (Computus) + +Though the word *Computus* can technically describe any sort of computation {{ "bede725" | cite }} or else a set of medieval tables for calculating various astrological events {{ "dictcomputus" | cite }}, it is also one of the most common historical names for the calculation of the Christian holiday of Easter every year. +Nominally, Easter happens the Sunday after the first full moon after the spring equinox (roughly March 21st). +This particular full moon is known by a number of names, such as the Pink (Strawberry) Moon, Hunter's Moon, or the Snow Moon, along with several others. +The most common name for it is the paschal full moon, which translates to "Passover" in Greek and signifies an important Jewish festival. + +For the first few centuries, the date of Easter each year was dictated by the Pope; however, after the church grew, it was no longer straightforward to communicate this date to all of Christendom. +As such, the church did what it could to algorithmically generate tables for clergy to determine the date of Easter each year. +To this day, the calculation of Easter still poses a problem, with western and eastern (orthodox) churches celebrating on different dates approximately 50% of the time. + +I'll be honest, there is a lot of good, Christian drama surrounding the calculation of this event and it's remarkably interesting to read about {{ "bien2004" | cite }}. +Suffice it to say that the date of Easter bamboozled many historical scholars, with at least one algorithm appearing in the early archives of the now famous scientific journal of *Nature* {{ "computus1876" | cite }}. +The calculation was so complicated that even Frederick Gauss had to try his hand at it (and failed before being corrected by one of his students). + +Essentially, the date of Easter depends on both the lunar and solar cycles +The date of the paschal full moon, for example, is static in the lunar calendar, but it is not in the solar calendar. +In this way, computus is the act of mapping a lunar cycle onto the Gregorian (solar) calendar everyone knows and loves. +Because many different calendar systems have existed throughout history, there was a natural question as to *which* calendar system would be used to calculate the precise date of Easter. +The western churches chose the Gregorian calendar and the eastern churches chosethe Julian one, and this is one reason why western and eastern churches sometimes celebrate on different dates. +That said, the Gregorian calendar more accurately represents the true date of the paschal full moon, so the western church's approach ended up being more precise. + +Though there are many methods to calculate Easter, for now, we will focus only on Gauss's algorithm; however, we mayl certainly come back (in subsequent years) to incorporate other Easter algorithms if there is demand. +These algorithms are some of my favorite gems in the history of algorithm design because of all the drama surrounding the calculation of something that seems trivial! +After all, how hard could it be to calculate Easter? + +## Gauss's Easter algorithm history + +Gauss is known for a lot of things: Gaussian elimination, the Cooley-Tukey method before Cooley or Tukey even existed, Gauss's Law for electromagnetism, etc. +One thing he is *not* particularly well known for is an algorithm he devised in 1800, which was later corrected by his student Peter Paul Tittle in 1816. +In fact, there were a series of publications from Gauss in this era all relating to the precise date of Easter. +The legend goes that Gauss actually did not know his real birthday in the Gregorian calendar and used this same algorithm to determine it. +Apparently, his mother only told him that he was born on a Wednesday 8 days before Ascension Day in 1777, which corresponds to April 30th {{ "bien2004" | cite }}. + +Honestly, Gauss's Easter algorithm was the 19th century equivalent of undocumented code. +I could imagine Gauss grumpily "patching" his method when users complained that it did not work on dates past 4200 or even certain dates within his own era! +When some of his compatriots (such as Johann Lambert and Jean Joseph Delambre) expressed their concern over the method's performance, Gauss replied by saying, + +> The investigation by which the formula [...] is found is based on higher arithmetic, for which I presumably cannot refer to any publication. + +Which was the 19th century equivalent of saying, "you are too dumb to understand my genius." +I have definitely met a few fledgling programmers who feel the same, but none of them were anywhere near as prolific as Gauss. + +One of the most important fans of Gauss's work was Servois, who created a calendar based on Gauss's 1800 publication, shown below: + +Servois' 1800 table + +This calendar shows the date the paschal full moon, indicating that Easter will be the following Sunday {{ "servois" | cite }}. +In this table, a value greater than 22 indicates the full moon will be on the presented number (date) in March and a value less than 22 indicates the full moon will be on that date in April. +The $$y$$-axis of this table indicates the decade and the $$x$$-axis indicates the precise year. +Admittedly, the notation is a bit funky, but it was 1813. +Times were different then. + +The task for this chapter will be to explain (to the best of my abilities) how one would go about using Gauss's Easter algorithm to calculate the date of Easter for any given year (within the limitations of the algorithm). + +## Background + +Because Easter is the Sunday following the paschal full moon, which is the first full moon of spring, Gauss's algorithm is tasked at finding a way to map the lunar calendar to the Gregorian (solar) calendar. +For this reason, before discussing the algorithm, itself, we must first introduce both calendar systems. +The Gregorian (solar) calendar has been created to mark Earth's full revolution around the Sun, which is approximately 365.2425 days. +Unfortunately, days are based on the Earth's rotation about its axis, not its revolution around the Sun, so the number of days in a year is not an integer number (such as 365). +This discrepancy has actually lead to a large number of calendar systems, including one invented by Gauss, himself {{ "standish2004" | cite }}. +Before the Gregorian calendar, there was another correction made from an old Roman calendar to set the days in a year to be 365.25 days. +This was called the Julian calendar. +From there, the Julian calendar was further corrected to the Gregorian calendar with 365.2425 days. +Though there is only a small change necessary to use Gauss's Easter algorithm for the Julian calendar, this will not be covered here; however, if you want to see this, we can add it in upon request. + +To account for the non-integer nature of the Gregorian year, a leap day is celebrated on February 29th every 4 years, with exception of when the year is a multiple of 100, where no leap-day is observed; if the year is divisible by 400, however, a leap day is still observed. +This means that every 400 years, there are 97 leap days. +This is why a leap day was celebrated in 2020 and 2000, but was not in 1900. +If at this point, you feel like your favorite calendar system is held together by duct tape and string, you would be right. + +In addition to the solar year, Gauss's Easter algorithm also needs to keep the lunar year into account. +A lunar month corresponds to the time it takes the Moon to complete one full revolution around the Earth. +In most cases, this is approximately 27.5 days {{ "lunar_month_wiki" | cite }}. +That said, space is complicated and the Moon is not the only revolving body. +Lunar phases are related to the time it takes for the Moon to return to its location *in relation to* the line connecting the Sun and Earth, as shown below: + +Synodic half year + +This is called the synodic month and will be the approximation used for this chapter. +Below, we also show a snapshot of this simulation after 6 synodic months: + +Synodic half year + +Here, we show an outline of the Earth and Moon in an arbitrary initial position, each with an angle of $$-\frac{\pi}{4}$$ from the horizontal axis. +In addition, we show the location of the Moon and Earth again after 6 synodic months and additional outlines for each intermediate synodic month. +Red lines are drawn from the center of the sun to the moon to indicate the positioning of the moon in relation to the sun and earth. +In all positions, the Moon is hidden behind the Earth, creating the full moon phase. +In this way, the synodic month is the time between two consecutive phases, which is slightly longer than the time it takes to revolve around the Earth and return to the same angle (here $$\frac{\pi}{4}$$). +Each synodic month is approximately 29.5 days, so a synodic year of 12 lunar months is 354 days, which is 11 days shorter than the normal 365 days in a Gregorian year. +The following is a pictorial representation of offset between a solar and lunar year: + +Full year + +Here, we see the Sun at the center, with the Earth and Moon starting the year at an angle of $$-\frac{\pi}{4}$$ from the horizontal axis. +The initial location of the Earth and Moon are shown as an outline with the letter "A" at their center. +After a full synodic lunar year (12 lunar months), another outline of the Earth and Moon are shown at position B, and after a full Gregorian year, they are shown in position C. +An arc is then drawn showing the difference of 11 days between the Earth's position after a synodic year, and another arc is drawn to show the difference between the Moon's position after a full Gregorian year. + +Because the synodic month and the solar year are not synchronized, the phase of the Moon will be different on the same day of the Gregorian year. +That said, the lunar and solar calendars will re-synchronize roughly every 19 years. +For example, if there is a new moon on January 1st, 2020, there will not be a new moon on January 1st, 2021; however, there *will* be a new moon on January 1st, 2039. +This 19-year cycle where the Moon and Sun are waiting to re-synchronize is known as the Metonic cycle and has been studied for centuries. + +This cycle allows us to somewhat easily transition between solar and lunar calendars. +If we imagine any Gregorian date (let's say January 1st again for clarity), the moon could be in one of 19 different phases, as shown below: + +Metonic cycle + +Here, we show each possible phase of the moon as an outline, but the actual phase as a grey circle. +Essentially, by knowing what year we are on in the Metonic cycle, we can single out which phase of the moon we will see on any given date. +This is powerful and will allow us to find the next full moon by looking ahead a few days. + +As a final note, there is a small offset in the Metonic cycle of 1 hour and 45 minutes every 19 years, so in 2500 years, it will be 8 days off, but that's a problem for people in 2500. +For now, we should be able to start discussing the algorithm, itself. + +## The algorithm + +As alluded to in Gauss's quote above, the Easter algorithm is closer to a set of formulas than a method used to compute anything on a modern computer. +This is partially because of bad software engineering by Gauss and partially because computers did not really exist at that point. +Considering this method was literally called *Computus*, there probably was not much to compute at all at the time. +Nowadays, you could more easily find the date of Easter with loops and conditions, but this is the *Arcane* Algorithm Archive, and this is definitely an arcane algorithm, so let's go! + +For this section, we will be following similar notation to Gauss's original 1800 work, which is a bit terse and hard to follow; however, each term is significantly meaningful. +If you are reading this and think you have a better way to present anything, please let us know (with an issue or pull request on github) and we can correct the text! + +This method can be split into 2 parts: +1. Calculating the days from March 21st to the next full moon +2. Calculating the days from the full moon to the next Sunday + +In the following sections, we will discuss both individually. + +### Calculating the date of the next full moon +To start, we will be calculating $$d$$, which is the number of days until the next full moon from March 21st (the first day of spring according to the pope). +To calculate this, we need to first calculate a number of auxiliary variables, starting with the current year's location on the Metonic calendar, +$$ +a = \text{year}~\%~19, +$$ +where $$\%$$ is the modulo operator and 19 is the length of the Metonic calendar in years. +From here, we also need to calculate an offset to $$a$$, and for this we need the century index, +$$ +k = \left\lfloor\frac{\text{year}}{100}\right\rfloor, +$$ +where $$\lfloor\cdot\rfloor$$ is the flooring operation of rounding the value down to the nearest integer. +With this, we can calculate the shift in the Metonic cycle to be, + +$$ +p = \left\lfloor\frac{13+8k}{25}\right\rfloor. +$$ + +This expression represents the fact that the Metonic cycle will be 8 days off every 2500 years and adds an additional offset of 13 to ensure the Metonic cycle aligns with empirical observation. + +At this point, we know what year we are at on the Metonic calendar and have calculated an offset accordingly; however, we have yet to take into account leap years. +As stated above, there are 97 leap days every 400 years, and the calculation of $$p$$ above requires correction for the 3 leap days missed. +If one adds 25 leap days per century and subtracts $$k$$, they will find 96 leap days every 400 years instead, which is a close approximation, but off by one. +This accounts for the fact that leap days are not celebrated on years that are multiples of 100. +There is, however, an exception made for years that are multiples of 400, which is why Gauss calculated an additional variable, + +$$ +q = \left\lfloor\frac{k}{4}\right\rfloor. +$$ + +This means that $$100-k-q$$ will provide the appropriate number of leap days every 400 years. +After this is found, we then calculate a sum of all offsets within a lunar month. + +$$ +M = (15-p+k-q)~\%~30 +$$ + +where 15 is an offset indicating that the full moon on year 0 is 15 days from March 21st, $$p$$ is the number of days we are off from the Metonic cycle, and $$k-q$$ are non-observed leap days. +The values of $$p$$, $$k$$, and $$q$$ all provide *century* offsets, which means that the value of $$M$$ will provide the correct starting point for each century. +The $$\%30$$ (modulo 30 arithmetic) constricts our calculation to be within a single synodic lunar month of approximately 30 days. + +With all this information, we can finally calculate the number of days from March 21st until the first full moon, as + +$$ +d = (19a+M)~\%~30 +$$ + +Again, the $$\%~30$$ operation makes sense here because there is no way the next full moon could occur over 30 days (a synodic lunar month) from March 21st. +At first glance, this is simply a calculation of $$a$$ (where we are on the Metonic cycle) with some offset, $$M$$. +This is true, but there is an additional multiplicative factor of 19. +One might be tempted to wave this away by saying, "19 is the number of years in the Metonic cycle, so this makes sense!" +The truth is that that 19 is a bit more complicated. +This calculation is a calculation of *days*, not years. + +Every 12 lunar months is roughly 354 days, which is 11 days shorter than 365. +This means that every year in the Metonic cycle, the lunar phase will be 11 days behind. +It just so happens that $$-11~\%~30 = 19$$. +Thus, $$19a$$ is a combination of this 11 day offset and the fact that we are using modulo 30 arithmetic. +After 19 years, the lunar calendar will be a full 365 days off in the solar calendar, but again, we only care about *day* offsets in this calculation. +No one really keeps track of lunar years, just solar ones. + +Regardless, we now have $$d$$, the number of days until the next full moon. +Interestingly, this is all the information necessary to replicate Servois's table above. +From here, we simply need to create a two-dimensional array with the decade on the $$y$$ axis and year on the $$x$$ axis and set within it the value of $$(21+d)~\%~31$$, where the 21 represents the 21st of March, and the $$\%~31$$ comes from the fact that there are 31 days in March. +For example, if we were to do this computation for the years from 2000 to 2099, we would find the following table: + +Servois' 2000 table + +Which shows that the date of the paschal full moon for 2020 is April 9th. +Now we can move on to finding the precise date of Easter, which should be the following Sunday + +### Calculating the next Sunday +This calculation will take a few variables from the previous section, namely $$k-q$$ (the number of non-observed leap days), and $$d$$ (the number of days since March 21st to the next full moon). +For the last calculation, we synchronized the number of days in a lunar month with the Gregorian (solar) calendar. +For this computation, we do similar operations, but for the weekly calendar of 7 days, this value will be stored in $$e$$. +The first step is calculating the correct offset each century based on the fact that Jan 1st, in year 1 was a Friday and then accounting for all the non-observed leap days ($$k-q$$), + +$$ +N = (4+k-q)~\%~7. +$$ + +From here, things get a little tricky. +There are 52 weeks in a year, but $$52\times7=364$$, meaning we are essentially one day off every year, with exception of leap years where we are two days off. +As an example, look at the following table + +| January 1st | Day of the week | Special considerations | +| ----------- | --------------- | ---------------------- | +| 2017 | Sunday | None | +| 2018 | Monday | None | +| 2019 | Tuesday | None | +| 2020 | Wednesday | Leap Year | +| 2021 | Friday | None | + +Simply put, every year we should subtract one day of the week, but on leap years, we should subtract 2. +To keep tabs on this, we need two separate counts, + +$$ +b = \text{year}~\%~4, +$$ +and +$$ +c = \text{year}~\%~7, +$$ +where $$b$$ keeps track of leap years, and $$c$$ simply increments by 1 every year. +Through a bit of mathematical magic, we can find the expression $$2b+4c$$, which will be -1 in modulo 7 arithmetic for every year, except leap years where it will be -2. + +With all these terms put together, we can finally calculate the offset from the full moon to Easter Sunday as + +$$ +e = (2b+4c+6d+N)~\%~7. +$$ + +Here, all terms are described as above and the multiplicative factor of 6 to $$d$$ will provide an offset to Sunday without correcting for leap days. + +### Wrapping up + +At this point, we can calculate the days from March 21st to Easter Sunday to be $$d+e$$. +In particular + +$$ +\text{Easter} = \left\{ + \begin{align} + d+e+22\text{ March}& \qquad \text{if } 22+d+e\leq31 \\ + d+e-9\text{ April}& \qquad \text{if } 22+d+e>31 \\ + \end{align} +\right. +$$ + +Remember that March 22nd would be the first possible day to celebrate Easter because March 21st would be the first possible full moon of spring. +All said, there are a few exceptions that are somewhat tricky to understand, namely: + +$$ +e = \left\{ + \begin{align} + &e \\ + &-1, \qquad \text{if } d=29 \text{ and } e=6 \text{ or } d=28, e=6, \text{ and } a>10 + \end{align} +\right. +$$ + +These conditionals are placed on the output of $$d$$ and correspond to when Easter falls on April 26th (if $$d = 29$$) or April 25th (if $$d = 28$$). +In both of these cases, we are setting $$e=-1$$, which has the effect of removing a week from the date of Easter. +For example, an Easter that would be celebrated on the 26th would instead be celebrated on the 19th. + +Many say that these conditionals are placed on the output for historical reasons, but between you and me, I feel there is a more mathematical reason that I do not fully understand. +After all, why is the correction for $$d=28$$ only placed on the Easter date output on the second half of the Metonic cycle (if $$a > 10$$)? +If you think you might have a better idea as to why these dates are corrected as such, please let us know! + +As mentioned, this particular algorithm does not make use of any standard computational techniques. +There are no loops, conditionals, stacks, or queues. +However, there can be no doubt that Gauss was a master of his craft. +The sheer complexity of this calculation both baffles and astounds me -- especially because this was done hundreds of years before computational thinking became common-place. + +Sure, this can be done straightforwardly with a calculator, but it is no doubt an algorithm worth discussing and celebrating for its ingenuity at the time of creation. + +## Video Explanation + +Here is a video describing key elements of Gauss's Easter Algorithm: + +
+ +
+ +## Example Code +Unlike many other chapters in the Algorithm Archive, this particular method can be described almost entirely by mathematical expressions. +As such, it should be relatively straightforward to implement in a number of different languages, and I heartily encourage you to do so! +For now, we have the code outputting a tuple of $$d$$ and $$e$$, so users can use this to calculate either the date of Easter or Servois's table, depending on their use-case; however, please modify the code however you wish! + +{% method %} +{% sample lang="jl" %} +[import, lang:"julia"](code/julia/gauss_easter.jl) +{% sample lang="hs" %} +[import, lang:"haskell"](code/haskell/gauss_easter.hs) +{% sample lang="py" %} +[import, lang:"python"](code/python/gauss_easter.py) +{% sample lang="crystal" %} +[import, lang:"crystal"](code/crystal/gauss_easter.cr) +{% sample lang="rust" %} +[import, lang:"rust"](code/rust/gauss_easter.rs) +{% sample lang="ps1" %} +[import, lang:"powershell"](code/powershell/gauss_easter.ps1) +{% sample lang="c" %} +[import, lang:"c"](code/c/gauss_easter.c) +{% sample lang="cpp" %} +[import, lang:"cpp"](code/cpp/gauss_easter.cpp) +{% sample lang="lisp" %} +[import, lang:"lisp"](code/clisp/gauss-easter.lisp) +{% sample lang="nim" %} +[import, lang:"nim"](code/nim/gauss_easter.nim) +{% sample lang="scala" %} +[import, lang:"scala"](code/scala/gauss_easter.scala) +{% sample lang="dart" %} +[import, lang:"dart"](code/dart/gauss_easter.dart) +{% sample lang="javascript" %} +[import, lang:"javascript"](code/javascript/gauss_easter.js) +{% sample lang="typescript" %} +[import, lang:"typescript"](code/typescript/gauss_easter.ts) +{% endmethod %} + + +### Bibliography + +{% references %} {% endreferences %} + + + +## License + +##### Code Examples + +The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/main/LICENSE.md)). + +##### Text + +The text of this chapter was written by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). + +[

](https://creativecommons.org/licenses/by-sa/4.0/) + +##### Images/Graphics +- The image "[Servois 1800 Colored Table](res/servois_1800.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[Relative Orbits](res/orbit.svg)" was created by [Xadisten](https://github.com/lockcmpxchg8beax) and was provided during a discussion on Twitch. It is licensed under the [Creative Commons Attribution 4.0 International License](https://creativecommons.org/licenses/by/4.0/legalcode). +- The image "[Synodic Half Year](res/synodic_half_year.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[Metonic shadows](res/metonic.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[Full Year Orbit](res/orbit.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[Servois 2000 Colored Table](res/servois_2000.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). diff --git a/contents/computus/res/metonic.png b/contents/computus/res/metonic.png new file mode 100644 index 000000000..239d77383 Binary files /dev/null and b/contents/computus/res/metonic.png differ diff --git a/contents/computus/res/orbit.png b/contents/computus/res/orbit.png new file mode 100644 index 000000000..7f61d0012 Binary files /dev/null and b/contents/computus/res/orbit.png differ diff --git a/contents/computus/res/orbit.svg b/contents/computus/res/orbit.svg new file mode 100644 index 000000000..0ffdca8fc --- /dev/null +++ b/contents/computus/res/orbit.svg @@ -0,0 +1,86 @@ + + + + + + + + + + + + + + + + + + + diff --git a/contents/computus/res/servois_1800.png b/contents/computus/res/servois_1800.png new file mode 100644 index 000000000..a523f311d Binary files /dev/null and b/contents/computus/res/servois_1800.png differ diff --git a/contents/computus/res/servois_2000.png b/contents/computus/res/servois_2000.png new file mode 100644 index 000000000..eead19862 Binary files /dev/null and b/contents/computus/res/servois_2000.png differ diff --git a/contents/computus/res/synodic_half_year.png b/contents/computus/res/synodic_half_year.png new file mode 100644 index 000000000..54a7ff3d2 Binary files /dev/null and b/contents/computus/res/synodic_half_year.png differ diff --git a/contents/convolutions/1d/1d.md b/contents/convolutions/1d/1d.md new file mode 100644 index 000000000..ecf56a1df --- /dev/null +++ b/contents/convolutions/1d/1d.md @@ -0,0 +1,335 @@ +# Convolutions in 1D +As mentioned in the [introductory section for convolutions](../convolutions.md), convolutions allow mathematicians to "blend" two seemingly unrelated functions; however, this definition is not very rigorous, so it might be better to think of a convolution as a method to apply a filter to a signal or image. +This, of course, brings up more questions: what is a filter? What is a signal? How is this all related to images? + +For this, we will start with some predefined signal. +It does not matter too much what it is, so we will pick a square wave where everything is set to zero except for a few elements at the center, which will be set to one. +This signal can be treated as an array, or a black and white, one-dimensional image where everything is black except for a white strip at the center. +We will also introduce a filter, which will be a simple triangle wave that goes to 1. +Both of these are shown below: + +

+ + +

+ +So now we have a signal and a filter. +How do we apply the filter to the signal? +The easiest way to do this is to iterate through every point in the signal and blend it with neighboring elements, where each neighboring element is weighted based on the filter value. +So in the case where the triangle wave is only 3 elements (`[0.5, 1, 0.5]`), the output at each point would be + +$$ +C_n = \frac{A_{n-1}}{2} + A_{n} + \frac{A_{n+1}}{2}, +$$ + +where $$C$$ is the output value, $$A$$ is the input array (a signal or image), and $$n$$ is an iterable element through that signal. +In this way, the "application of a filter," is simply a multiplication of the triangle wave centered around each point of the input array, followed by in integral or sum of the output. +In some sense, this means we will shift the filter, then multiply and sum every step. +This can be seen in the following animation: + +
+ +
+ +Here, the purple, dashed line is the output convolution $$C$$, the vertical line is the iteration $$n$$, the blue line is the original signal, the red line is the filter, and the green area is the signal multiplied by the filter at that location. +The convolution at each point is the integral (sum) of the green area for each point. + +If we extend this concept into the entirety of discrete space, it might look like this: + +$$(f*g)[n] = \sum_{m = -\infty}^{\infty}f[m]g[n-m] = \sum_{m = -\infty}^{\infty}f[n-m]g[m]$$ + +Where `f[n]` and `g[n]` are arrays of some form. +This means that the convolution can calculated by shifting either the filter along the signal or the signal along the filter. +This can be read as we said before: every step, we shift the filter, multiply, and sum. +There is, of course, a small caveat here. +Why are we subtracting $$m$$? +Certainly, if we wanted to "shift the filter along the signal," we could also do so by *adding* $$m$$ instead, but that is actually an entirely separate operation known as a *correlation*, which will be discussed at a later time. + +The simplest interpretation for this equation is the same as the animation: we reverse the second array, and move it through the first array one step at a time, performing a simple element-wise multiplication and summation at each step. +With this in mind, we can almost directly transcribe the discrete equation into code like so: + +{% method %} +{% sample lang="jl" %} +[import:27-46, lang:"julia"](code/julia/1d_convolution.jl) +{% sample lang="cs" %} +[import:63-84, lang:"csharp"](code/csharp/1DConvolution.cs) +{% sample lang="py" %} +[import:20-31, lang:"python"](code/python/1d_convolution.py) +{% endmethod %} + +The easiest way to reason about this code is to read it as you might read a textbook. +For each element in the output domain, we are summing a certain subsets of elements from `i-length(filter)` to `i` after multiplying it by the reversed filter (`filter[i-j]`). +In this way, it is precisely the same as the mathematical notation mentioned before. + +In contrast to the animation, where the filter continuously reappears on the left edge of the screen, the code we have written for this part of the chapter requires the user to specify what they expect the output array length to be. +Determining what should happen at the edges of the convolution is a somewhat hotly debated topic and differs depending on what the user actually wants, so we will be discussing this in greater detail later in this chapter. + +As an important note, if we were to extend the convolution into continuous space, we might write something like: + +$$(f*g)(x) = \int_{-\infty}^{\infty}f(\xi)g(x-\xi)d\xi = \int_{-\infty}^{\infty}f(x-\xi)g(\xi)d\xi$$ + +Note that in this case, $$x$$ and $$\xi$$ are not necessarily spatial elements, but the interpretation is otherwise the same as before. + +At this stage, the mathematics and code might still be a little opaque, so it is a good idea to play around a bit and think about how this operation might be used in practice with a few different filters. + +## Playing with filters + +Honestly, the best way to learn how convolutions work is by using them for a number of different signals and filters, so +let us extend the previous triangle filter a bit further by convolving a square wave with a relatively sharp Gaussian, which can be seen in the following animation: + +
+ +
+ +In practice, the convolutional output here is very similar to the triangle wave we showed before. +The final convolved image looks a lot like the square, except that its boundaries have been smoothed out or "blurred." +In practice whenever a Gaussian filter is used, it will always blur the other convolved signal, which is why a convolution with a Gaussian is also called a *blurring operation*. +This operation is used very often when dealing with two-dimensional images, and we will discuss common kernels found in the wild in [the next section](../2d/2d.md). +Still, it is interesting to see the blurring operation in action by convolving a random distribution with a larger Gaussian filter: + +
+ +
+ +In this animation, the final convolution is so blurred that it does not seem related to the random input signal at all! +In fact, this animation seems to blend much more when compared to the previous Gaussian and the triangle wave animations. +This is because the Gaussian is wider than the previous to filters. +In general, the wider the filter, the stronger the blurring effect. + +So what happens if we convolve a Gaussian with another Gaussian? +Well, that is shown below: + +
+ +
+ +As one might expect, the output is a blurrier Gaussian, which is essentially just wider. +If you were paying particularly close attention to the visualization, you might have noticed that the green area inside this visualization does not properly line up with the overlap of the two arrays. +Don't worry! +This is exactly what should happen! +Remember that the convolution requires a *multiplication* of the signal and filter, which was the same as the overlap when the signal was a square wave; however, in the case of two distinct signals, we should expect the multiplied output to look somewhat distinct. + +Let us extend this concept to one final example of a square wave convolved with a triangular, sawtooth function that looks like this: + +

+ +

+ +This is the first non-symmetric filter of this chapter, and its convolution would look like this: + +
+ +
+ +Non-symmetric filters are useful for testing convolutions to ensure that the output is correct, so it might be worthwhile to linger on this animation for a bit longer. +Notice how the convolution has an accelerating, positive slope when the reversed sawtooth function interacts with the square. +This makes sense as the smallest part of the triangle interacts first. +Similarly, there is a negatively accelerating slope when the sawtooth function leaves the square. + +## Dealing with boundaries + +In all of the animations, we have shown the filter constantly reappearing on the left edge of the screen, which is not always the best thing to do at the boundaries. +In fact, these boundary conditions are somewhat non-trivial to code, so for this section, we will start with relatively simple boundary conditions that were introduced in the previous code example. + +### Simple boundaries + +In general, if a user wants to see a full convolution between two signals, the output size must be the size of the two signals put together, otherwise, we cannot iterate through the entire convolutional output domain. +For example, here is random noise again convolved with a Gaussian function, but with non-periodic boundaries: + +
+ +
+ +This shows the full, unbounded convolution of the two signals, where +we clearly see a "ramp up" and "ramp down" phase at the start and end of the animation. +That said, there are many applications where the user actually needs to specify the output domain to be another length, such as the size of one of the input signals. + +In this case, the simplest boundary would be to assume that whenever the filter hits the end of the image, it simply disappears. +Another way to think about this is that the signal only exists for the domain we specify it over, and is all 0s outside of this domain; therefore, the filter does not sum any signal from elements beyond its scope. +As an example, let's take the same example as before: + +
+ +
+ +Similar to the case without boundary conditions, this convolution needs to "ramp up," but it does not need to "ramp down." +This is because the convolution output no longer extends past the bounds of the original signal so the bounded convolution is a subset of the full convolution. +More than that, the convolution does not go all the way to 0 on the right side. +This means that we are actually ignoring a rather important part of the convolution! + +This is 100% true; however, if the signal is large and the filter is small (as is the case with most of image processing), we do not really care that much about the bits of the convolution we missed. +In addition, there is a way to center the convolution by modifying the location where the filter starts. +For example, we could have half of the filter already existing and overlapping with the signal for the very first computed point of the convolution. +For this reason, simple bounds are used frequently when performing convolutions on an image. + +In the previous code snippet, we were able to perform both a bounded and unbounded convolution. +Here it is again for clarity: + +{% method %} +{% sample lang="jl" %} +[import:27-46, lang:"julia"](code/julia/1d_convolution.jl) +{% sample lang="cs" %} +[import:63-84, lang:"csharp"](code/csharp/1DConvolution.cs) +{% sample lang="py" %} +[import:20-31, lang:"python"](code/python/1d_convolution.py) +{% endmethod %} + +Here, the main difference between the bounded and unbounded versions is that the output array size is smaller in the bounded case. +For an unbounded convolution, the function would be called with a the output array size specified to be the size of both signals put together: + +{% method %} +{% sample lang="jl" %} +[import:58-59, lang:"julia"](code/julia/1d_convolution.jl) +{% sample lang="cs" %} +[import:96-97, lang:"csharp"](code/csharp/1DConvolution.cs) +{% sample lang="py" %} +[import:41-42, lang:"python"](code/python/1d_convolution.py) +{% endmethod %} + +On the other hand, the bounded call would set the output array size to simply be the length of the signal + +{% method %} +{% sample lang="jl" %} +[import:61-62, lang:"julia"](code/julia/1d_convolution.jl) +{% sample lang="cs" %} +[import:98-99, lang:"csharp"](code/csharp/1DConvolution.cs) +{% sample lang="py" %} +[import:44-45, lang:"python"](code/python/1d_convolution.py) +{% endmethod %} + +Finally, as we mentioned before, it is possible to center bounded convolutions by changing the location where we calculate the each point along the filter. +This can be done by modifying the following line: + +{% method %} +{% sample lang="jl" %} +[import:35-35, lang:"julia"](code/julia/1d_convolution.jl) +{% sample lang="cs" %} +[import:71-71, lang:"csharp"](code/csharp/1DConvolution.cs) +{% sample lang="py" %} +[import:25-25, lang:"python"](code/python/1d_convolution.py) +{% endmethod %} + +Here, `j` counts from `i-length(filter)` to `i`. +To center the convolution, it would need to count from `i-(length(filter)/2)` to `i+(length(filter)/2)` instead. + +I think this is a good place to stop discussions on simple boundary conditions. +Now let us talk a bit more in detail about the case where we want the filter to continuously reappear every loop. +This case is known as the "periodic boundary condition" and was used for the visualizations at the start of this chapter. + +### Periodic boundary conditions + +Though periodic boundary conditions are more complicated that those mentioned in the previous section, they are still *relatively* straightforward to implement. +With these conditions, the filter will wrap itself around to the other end of the signal whenever it hits a boundary. +In this way, the signal is periodic, with an identical copy of itself acting as left and right neighbors. +Those neighbors then have other neighbors, and those then have more neighbors, creating a sea of signals extending to infinity and beyond in both directions. +For us, this means that when the filter leaves one edge of the domain, it simply appears on the other, opposite edge. + +This particular convolution is known as a *cyclic* convolution and is also the most common output of convolutions that work via the [convolutional theorem](../convolutional_theorem/convolutional_theorem.md), which will be discussed in another section. +For clarity: here is the same cyclic visualization we showed above with a random distribution and a Gaussian signal. + +
+ +
+ +In code, this typically amounts to using some form of modulus operation, as shown here: + +{% method %} +{% sample lang="jl" %} +[import:4-25, lang:"julia"](code/julia/1d_convolution.jl) +{% sample lang="cs" %} +[import:38-61, lang:"csharp"](code/csharp/1DConvolution.cs) +{% sample lang="py" %} +[import:5-17, lang:"python"](code/python/1d_convolution.py) +{% endmethod %} + +This is essentially the same as before, except for the modulus operations, which allow us to work on a periodic domain. + +As a final note before continuing: dealing with boundaries is tricky business and can dramatically change the behavior of the output convolution. +For this reason, it is important to think about what types of boundaries will work best for what you, the programmer, actually need. +The selection of boundary conditions will be a common trope for a large portion of computer graphics and physics algorithms where researchers often need to present and simulate data on an array of some sort. + +## Example Code + +For the code associated with this chapter, we have used the convolution to generate a few files for the full convolution, along with the periodic and simple boundary conditions discussed in this chapter. + +{% method %} +{% sample lang="jl" %} +[import, lang:"julia"](code/julia/1d_convolution.jl) +{% sample lang="cs" %} +[import, lang:"csharp"](code/csharp/1DConvolution.cs) +{% sample lang="py" %} +[import, lang:"python"](code/python/1d_convolution.py) +{% endmethod %} + +At a test case, we have chosen to use two sawtooth functions, which should produce the following images: + +| Description | Image | +| ----------- | ----- | +| Simple Boundaries | | +| Full | | +| Cyclic | | + +As a sanity check, make sure that the bounded convolution is a subset of the full convolution. +In this example, the bounded convolution is the start of the full convolution, but it is entirely possible it could be the middle or somewhere else entirely depending on how you counted within the inner, summation loop for the convolution. + + + +## License + +##### Code Examples + +The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/main/LICENSE.md)). + +##### Images/Graphics +- The image "[Square Wave](../res/square_wave.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[Triangle Wave](../res/triangle_wave.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Triangle Square Convolution](../res/triangle_square_conv.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Gaussian Square Convolution](../res/1d_gaussian.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Gaussian Random Convolution](../res/1d_rand_gaussian_cyclic.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Double Convolution](../res/double_gaussian.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[Sawtooth Wave](../res/sawtooth.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Sawtooth Square Convolution](../res/1d_sawtooth.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Full Random Convolution](../res/1d_rand_gaussian_full.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Simple Random Convolution](../res/1d_rand_gaussian_simple.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[Simple Linear](../res/simple_linear.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[Full Linear](../res/full_linear.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[Cyclic](../res/cyclic.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). + + +##### Text + +The text of this chapter was written by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). + +[

](https://creativecommons.org/licenses/by-sa/4.0/) + +##### Pull Requests + +After initial licensing ([#560](https://github.com/algorithm-archivists/algorithm-archive/pull/560)), the following pull requests have modified the text or graphics of this chapter: +- none + diff --git a/contents/convolutions/1d/code/csharp/1DConvolution.cs b/contents/convolutions/1d/code/csharp/1DConvolution.cs new file mode 100755 index 000000000..c4c1016c6 --- /dev/null +++ b/contents/convolutions/1d/code/csharp/1DConvolution.cs @@ -0,0 +1,110 @@ +using System; +using System.IO; + +namespace Convolution1D +{ + public class Convolution1D + { + // Creates a sawtooth function with the given length. + static double[] CreateSawtooth(int length) + { + var array = new double[length]; + for (var i = 0; i < length; i++) + array[i] = (i + 1) / 200f; + return array; + } + + // Normalizes the given array. + static void Normalize(double[] array) + { + var norm = Norm(array); + for (var i = 0; i < array.Length; i++) + array[i] /= norm; + } + + // Calculates the norm of the array. + static double Norm(double[] array) + { + var sum = 0.0; + for (var i = 0; i < array.Length; i++) + sum += Math.Pow(array[i], 2); + return Math.Sqrt(sum); + } + + // Modulus function which handles negative values properly. + // Assumes that y >= 0. + static int Mod(int x, int y) => ((x % y) + y) % y; + + static double[] ConvolveCyclic(double[] signal, double[] filter) + { + var outputSize = Math.Max(signal.Length, filter.Length); + + // Convolutional output. + var output = new double[outputSize]; + var sum = 0.0; + + for (var i = 0; i < outputSize; i++) + { + for (var j = 0; j < outputSize; j++) + { + if (Mod(i - j, outputSize) < filter.Length) + { + sum += signal[Mod(j - 1, outputSize)] * filter[Mod(i - j, outputSize)]; + } + } + + output[i] = sum; + sum = 0.0; + } + + return output; + } + + static double[] ConvolveLinear(double[] signal, double[] filter, int outputSize) + { + // Convolutional output. + var output = new double[outputSize]; + var sum = 0.0; + + for (var i = 0; i < outputSize; i++) + { + for (var j = Math.Max(0, i - filter.Length); j <= i; j++) + { + if (j < signal.Length && (i - j) < filter.Length) + { + sum += signal[j] * filter[i - j]; + } + } + + output[i] = sum; + sum = 0.0; + } + + return output; + } + + static void Main() + { + // Create sawtooth functions for x and y. + var x = CreateSawtooth(200); + var y = CreateSawtooth(200); + + // Normalization is not strictly necessary, but good practice. + Normalize(x); + Normalize(y); + + // Full convolution, output will be the size of x + y - 1. + var fullLinearOutput = ConvolveLinear(x, y, x.Length + y.Length - 1); + // Simple boundaries. + var simpleLinearOutput = ConvolveLinear(x, y, x.Length); + // Cyclic convolution. + var cyclicOutput = ConvolveCyclic(x, y); + + // Output convolutions to different files for plotting. + File.WriteAllText("full_linear.dat", String.Join(Environment.NewLine, fullLinearOutput)); + File.WriteAllText("simple_linear.dat", String.Join(Environment.NewLine, simpleLinearOutput)); + File.WriteAllText("cyclic.dat", String.Join(Environment.NewLine, cyclicOutput)); + } + } +} + diff --git a/contents/convolutions/1d/code/julia/1d_convolution.jl b/contents/convolutions/1d/code/julia/1d_convolution.jl new file mode 100644 index 000000000..0019c2496 --- /dev/null +++ b/contents/convolutions/1d/code/julia/1d_convolution.jl @@ -0,0 +1,72 @@ +using DelimitedFiles +using LinearAlgebra + +function convolve_cyclic(signal::Array{T, 1}, + filter::Array{T, 1}) where {T <: Number} + + # output size will be the size of sign + output_size = max(length(signal), length(filter)) + + # convolutional output + out = Array{Float64,1}(undef,output_size) + sum = 0 + + for i = 1:output_size + for j = 1:output_size + sum += get(signal, mod1(j, output_size), 0) * get(filter, mod1(i-j, output_size), 0) + end + + out[i] = sum + sum = 0 + + end + + return out +end + +function convolve_linear(signal::Array{T, 1}, filter::Array{T, 1}, + output_size) where {T <: Number} + + # convolutional output + out = Array{Float64,1}(undef, output_size) + sum = 0 + + for i = 1:output_size + for j = max(1, i-length(filter)):i + if j <= length(signal) && i-j+1 <= length(filter) + sum += signal[j] * filter[i-j+1] + end + end + + out[i] = sum + sum = 0 + end + + return out +end + +function main() + + # sawtooth functions for x and y + x = [float(i)/200 for i = 1:200] + y = [float(i)/200 for i = 1:200] + + # Normalization is not strictly necessary, but good practice + normalize!(x) + normalize!(y) + + # full convolution, output will be the size of x + y - 1 + full_linear_output = convolve_linear(x, y, length(x) + length(y) - 1) + + # simple boundaries + simple_linear_output = convolve_linear(x, y, length(x)) + + # cyclic convolution + cyclic_output = convolve_cyclic(x, y) + + # outputting convolutions to different files for plotting in external code + writedlm("full_linear.dat", full_linear_output) + writedlm("simple_linear.dat", simple_linear_output) + writedlm("cyclic.dat", cyclic_output) + +end diff --git a/contents/convolutions/1d/code/python/1d_convolution.py b/contents/convolutions/1d/code/python/1d_convolution.py new file mode 100644 index 000000000..e77e68d09 --- /dev/null +++ b/contents/convolutions/1d/code/python/1d_convolution.py @@ -0,0 +1,53 @@ +import numpy as np + +def mod1(x, y): return ((x % y) + y) % y + +def convolve_cyclic(signal, filter_array): + output_size = max(len(signal), len(filter_array)) + out = np.zeros(output_size) + s = 0 + + for i in range(output_size): + for j in range(output_size): + if(mod1(i - j, output_size) < len(filter_array)): + s += signal[mod1(j - 1, output_size)] * filter_array[mod1(i - j, output_size)] + out[i] = s + s = 0 + + return out + + +def convolve_linear(signal, filter_array, output_size): + out = np.zeros(output_size) + s = 0 + + for i in range(output_size): + for j in range(max(0, i - len(filter_array)), i + 1): + if j < len(signal) and (i - j) < len(filter_array): + s += signal[j] * filter_array[i - j] + out[i] = s + s = 0 + + return out + +# sawtooth functions for x and y +x = [float(i + 1)/200 for i in range(200)] +y = [float(i + 1)/200 for i in range(200)] + +# Normalization is not strictly necessary, but good practice +x /= np.linalg.norm(x) +y /= np.linalg.norm(y) + +# full convolution, output will be the size of x + y - 1 +full_linear_output = convolve_linear(x, y, len(x) + len(y) - 1) + +# simple boundaries +simple_linear_output = convolve_linear(x, y, len(x)) + +# cyclic convolution +cyclic_output = convolve_cyclic(x, y) + +# outputting convolutions to different files for plotting in external code +np.savetxt('full_linear.dat', full_linear_output) +np.savetxt('simple_linear.dat', simple_linear_output) +np.savetxt('cyclic.dat', cyclic_output) diff --git a/contents/convolutions/2d/2d.md b/contents/convolutions/2d/2d.md new file mode 100644 index 000000000..e2cace847 --- /dev/null +++ b/contents/convolutions/2d/2d.md @@ -0,0 +1,191 @@ +# Convolutions on Images + +For this section, we will no longer be focusing on signals, but instead images (arrays filled with elements of red, green, and blue values). +That said, for the code examples, greyscale images may be used such that each array element is composed of some floating-point value instead of color. +In addition, we will not be discussing boundary conditions too much in this chapter and will instead be using the simple boundaries introduced in the section on [one-dimensional convolutions](../1d/1d.md). + +The extension of one-dimensional convolutions to two dimensions requires a little thought about indexing and the like, but is ultimately the same operation. +Here is an animation of a convolution for a two-dimensional image: + +
+ +
+ +In this case, we convolved the image with a 3x3 square filter, all filled with values of $$\frac{1}{9}$$. +This created a simple blurring effect, which is somewhat expected from the discussion in the previous section. +In code, a two-dimensional convolution might look like this: + +{% method %} +{% sample lang="jl" %} +[import:4-28, lang:"julia"](code/julia/2d_convolution.jl) +{% sample lang="py" %} +[import:5-19, lang:"python"](code/python/2d_convolution.py) +{% endmethod %} + +This is very similar to what we have shown in previous sections; however, it essentially requires four iterable dimensions because we need to iterate through each axis of the output domain *and* the filter. + +At this stage, it is worth highlighting common filters used for convolutions of images. +In particular, we will further discuss the Gaussian filter introduced in the [previous section](../1d/1d.md), and then introduce another set of kernels known as Sobel operators, which are used for naïve edge detection or image derivatives. + +## The Gaussian kernel + +The Gaussian kernel serves as an effective *blurring* operation for images. +As a reminder, the formula for any Gaussian distribution is + +$$ +g(x,y) = \frac{1}{2\pi\sigma^2}e^{-\frac{x^2+y^2}{2\sigma^2}}, +$$ + +where $$\sigma$$ is the standard deviation and is a measure of the width of the Gaussian. +A larger $$\sigma$$ means a larger Gaussian; however, remember that the Gaussian must fit onto the filter, otherwise it will be cut off! +For example, if you are using a $$3\times 3$$ filter, you should not be using $$\sigma = 10$$. +Some definitions of $$\sigma$$ allow users to have a separate deviation in $$x$$ and $$y$$ to create an ellipsoid Gaussian, but for the purposes of this chapter, we will assume $$\sigma_x = \sigma_y$$. +As a general rule of thumb, the larger the filter and standard deviation, the more "smeared" the final convolution will be. + +At this stage, it is important to write some code, so we will generate a simple function that returns a Gaussian kernel with a specified standard deviation and filter size. + +{% method %} +{% sample lang="jl" %} +[import:30-47, lang:"julia"](code/julia/2d_convolution.jl) +{% sample lang="py" %} +[import:21-33, lang:"python"](code/python/2d_convolution.py) +{% endmethod %} + +Though it is entirely possible to create a Gaussian kernel whose standard deviation is independent on the kernel size, we have decided to enforce a relation between the two in this chapter. +As always, we encourage you to play with the code and create your own Gaussian kernels any way you want! +As a note, all the kernels will be scaled (normalized) at the end by the sum of all internal elements. +This ensures that the output of the convolution will not have an obnoxious scale factor associated with it. + +Below are a few images generated by applying a kernel generated with the code above to a black and white image of a circle. + +

+ +

+ + +In (a), we show the original image, which is just a white circle at the center of a $$50\times 50$$ grid. +In (b), we show the image after convolution with a $$3\times 3$$ kernel. +In (c), we show the image after convolution with a $$20\times 20$$ kernel. +Here, we see that (c) is significantly fuzzier than (b), which is a direct consequence of the kernel size. + +There is a lot more that we could talk about, but now is a good time to move on to a slightly more complicated convolutional method: the Sobel operator. + +## The Sobel operator + +The Sobel operator effectively performs a gradient operation on an image by highlighting areas where a large change has been made. +In essence, this means that this operation can be thought of as a naïve edge detector. +Essentially, the $$n$$-dimensional Sobel operator is composed of $$n$$ separate gradient convolutions (one for each dimension) that are then combined together into a final output array. +Again, for the purposes of this chapter, we will stick to two dimensions, which will be composed of two separate gradients along the $$x$$ and $$y$$ directions. +Each gradient will be created by convolving our image with their corresponding Sobel operator: + +$$ +\begin{align} +S_x &= \left(\begin{bmatrix} +1 \\ +2 \\ +1 \\ +\end{bmatrix} \otimes [1~0~-1] +\right) = \begin{bmatrix} +1 & 0 & -1 \\ +2 & 0 & -2 \\ +1 & 0 & -1 \\ +\end{bmatrix}\\ + +S_y &= \left( +\begin{bmatrix} +1 \\ +0 \\ +-1 \\ +\end{bmatrix} \otimes [1~2~1] +\right) = \begin{bmatrix} +1 & 2 & 1 \\ +0 & 0 & 0 \\ +-1 & -2 & -1 \\ +\end{bmatrix}. +\end{align} +$$ + +The gradients can then be found with a convolution, such that: + +$$ +\begin{align} +G_x &= S_x*A \\ +G_y &= S_y*A. +\end{align} +$$ + +Here, $$A$$ is the input array or image. +Finally, these gradients can be summed in quadrature to find the total Sobel operator or image gradient: + +$$ +G_{\text{total}} = \sqrt{G_x^2 + G_y^2} +$$ + +So let us now show what it does in practice: + +

+ +

+ +In this diagram, we start with the circle image on the right, and then convolve it with the $$S_x$$ and $$S_y$$ operators to find the gradients along $$x$$ and $$y$$ before summing them in quadrature to get the final image gradient. +Here, we see that the edges of our input image have been highlighted, showing outline of our circle. +This is why the Sobel operator is also known as naïve edge detection and is an integral component to many more sophisticated edge detection methods like one proposed by Canny {{ "canny1986computational" | cite }}. + +In code, the Sobel operator involves first finding the operators in $$x$$ and $$y$$ and then applying them with a traditional convolution: + +{% method %} +{% sample lang="jl" %} +[import:49-63, lang:"julia"](code/julia/2d_convolution.jl) +{% sample lang="py" %} +[import:36-52, lang:"python"](code/python/2d_convolution.py) +{% endmethod %} + +With that, I believe we are at a good place to stop discussions on two-dimensional convolutions. +We will definitely return to this topic in the future as new algorithms require more information. + +## Example Code + +For the code in this section, we have modified the visualizations from the [one-dimensional convolution chapter](../1d/1d.md) to add a two-dimensional variant for blurring an image of random white noise. +We have also added code to create the Gaussian kernel and Sobel operator and apply it to the circle, as shown in the text. + +{% method %} +{% sample lang="jl" %} +[import, lang:"julia"](code/julia/2d_convolution.jl) +{% sample lang="py" %} +[import, lang:"python"](code/python/2d_convolution.py) +{% endmethod %} + + + +### Bibliography + +{% references %} {% endreferences %} + +## License + +##### Code Examples + +The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/main/LICENSE.md)). + +##### Images/Graphics +- The image "[8bit Heart](../res/heart_8bit.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[Circle Blur](../res/circle_blur.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[Sobel Filters](../res/sobel_filters.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[2D Convolution](../res/2d.mp4)" was created by [James Schloss](https://github.com/leios) and [Grant Sanderson](https://github.com/3b1b) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). + +##### Text + +The text of this chapter was written by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). + +[

](https://creativecommons.org/licenses/by-sa/4.0/) + +##### Pull Requests + +After initial licensing ([#560](https://github.com/algorithm-archivists/algorithm-archive/pull/560)), the following pull requests have modified the text or graphics of this chapter: +- none + diff --git a/contents/convolutions/2d/code/julia/2d_convolution.jl b/contents/convolutions/2d/code/julia/2d_convolution.jl new file mode 100644 index 000000000..6d38e5048 --- /dev/null +++ b/contents/convolutions/2d/code/julia/2d_convolution.jl @@ -0,0 +1,125 @@ +using DelimitedFiles +using LinearAlgebra + +function convolve_linear(signal::Array{T, 2}, filter::Array{T, 2}, + output_size) where {T <: Number} + + # convolutional output + out = Array{Float64,2}(undef, output_size) + sum = 0 + + for i = 1:output_size[1] + for j = 1:output_size[2] + for k = max(1, i-size(filter)[1]):i + for l = max(1, j-size(filter)[2]):j + if k <= size(signal)[1] && i-k+1 <= size(filter)[1] && + l <= size(signal)[2] && j-l+1 <= size(filter)[2] + sum += signal[k,l] * filter[i-k+1, j-l+1] + end + end + end + + out[i,j] = sum + sum = 0 + end + end + + return out +end + +function create_gaussian_kernel(kernel_size) + + kernel = zeros(kernel_size, kernel_size) + + # The center must be offset by 0.5 to find the correct index + center = kernel_size * 0.5 + 0.5 + + sigma = sqrt(0.1*kernel_size) + + for i = 1:kernel_size + for j = 1:kernel_size + kernel[i,j] = exp(-((i-center)^2 + (j-center)^2) / (2*sigma^2)) + end + end + + return normalize(kernel) + +end + +function create_sobel_operators() + Sx = [1.0, 2.0, 1.0]*[-1.0 0.0 1.0] / 9 + Sy = [-1.0, 0.0, 1.0]*[1.0 2.0 1.0] / 9 + + return Sx, Sy +end + +function compute_sobel(signal) + Sx, Sy = create_sobel_operators() + + Gx = convolve_linear(signal, Sx, size(signal) .+ size(Sx)) + Gy = convolve_linear(signal, Sy, size(signal) .+ size(Sy)) + + return sqrt.(Gx.^2 .+ Gy.^2) +end + +# Simple function to create a square grid with a circle embedded inside of it +function create_circle(image_resolution, grid_extents, radius) + out = zeros(image_resolution, image_resolution) + + for i = 1:image_resolution + x_position = ((i-1)*grid_extents/image_resolution)-0.5*grid_extents + for j = 1:image_resolution + y_position = ((j-1)*grid_extents/image_resolution)-0.5*grid_extents + if x_position^2 + y_position^2 <= radius^2 + out[i,j] = 1.0 + end + end + end + + return out +end + +function main() + + # Random distribution in x + x = rand(100, 100) + + # Gaussian signals + y = [exp(-(((i-50)/100)^2 + ((j-50)/100)^2)/.01) for i = 1:100, j=1:100] + + # Normalization is not strictly necessary, but good practice + normalize!(x) + normalize!(y) + + # full convolution, output will be the size of x + y + full_linear_output = convolve_linear(x, y, size(x) .+ size(y)) + + # simple boundaries + simple_linear_output = convolve_linear(x, y, size(x)) + + # outputting convolutions to different files for plotting in external code + writedlm("full_linear.dat", full_linear_output) + writedlm("simple_linear.dat", simple_linear_output) + + # creating simple circle and 2 different Gaussian kernels + circle = create_circle(50,2,0.5) + + normalize!(circle) + + small_kernel = create_gaussian_kernel(3) + large_kernel = create_gaussian_kernel(25) + + small_kernel_output = convolve_linear(circle, small_kernel, + size(circle).+size(small_kernel)) + large_kernel_output = convolve_linear(circle, large_kernel, + size(circle).+size(large_kernel)) + + writedlm("small_kernel.dat", small_kernel_output) + writedlm("large_kernel.dat", large_kernel_output) + + # Using the circle for Sobel operations as well + sobel_output = compute_sobel(circle) + + writedlm("sobel_output.dat", sobel_output) + +end diff --git a/contents/convolutions/2d/code/python/2d_convolution.py b/contents/convolutions/2d/code/python/2d_convolution.py new file mode 100644 index 000000000..48b79b611 --- /dev/null +++ b/contents/convolutions/2d/code/python/2d_convolution.py @@ -0,0 +1,122 @@ +import numpy as np +from contextlib import suppress + + +def convolve_linear(signal, filter, output_size): + out = np.zeros(output_size) + sum = 0 + + for i in range(output_size[0]): + for j in range(output_size[1]): + for k in range(max(0, i-filter.shape[0]), i+1): + for l in range(max(0, j-filter.shape[1]), j+1): + with suppress(IndexError): + sum += signal[k, l] * filter[i-k, j-l] + out[i, j] = sum + sum = 0 + + return out + + +def create_gaussian_kernel(kernel_size): + kernel = np.zeros((kernel_size, kernel_size)) + + # The center must be offset by 0.5 to find the correct index + center = kernel_size*0.5 + 0.5 + + sigma = np.sqrt(0.1*kernel_size) + + def kernel_function(x, y): + return np.exp(-((x-center+1)**2 + (y-center+1)**2)/(2*sigma**2)) + + kernel = np.fromfunction(kernel_function, (kernel_size, kernel_size)) + return kernel / np.linalg.norm(kernel) + + +def create_sobel_operators(): + Sx = np.dot([[1.0], [2.0], [1.0]], [[-1.0, 0.0, 1.0]]) / 9 + Sy = np.dot([[-1.0], [0.0], [1.0]], [[1.0, 2.0, 1.0]]) / 9 + + return Sx, Sy + +def sum_matrix_dimensions(mat1, mat2): + return (mat1.shape[0] + mat2.shape[0], + mat1.shape[1] + mat2.shape[1]) + +def compute_sobel(signal): + Sx, Sy = create_sobel_operators() + + Gx = convolve_linear(signal, Sx, sum_matrix_dimensions(signal, Sx)) + Gy = convolve_linear(signal, Sy, sum_matrix_dimensions(signal, Sy)) + + return np.sqrt(np.power(Gx, 2) + np.power(Gy, 2)) + + +def create_circle(image_resolution, grid_extents, radius): + out = np.zeros((image_resolution, image_resolution)) + + for i in range(image_resolution): + x_position = ((i * grid_extents / image_resolution) + - 0.5 * grid_extents) + for j in range(image_resolution): + y_position = ((j * grid_extents / image_resolution) + - 0.5 * grid_extents) + if x_position ** 2 + y_position ** 2 <= radius ** 2: + out[i, j] = 1.0 + + return out + + +def main(): + + # Random distribution in x + x = np.random.rand(100, 100) + + # Gaussian signals + def create_gaussian_signals(i, j): + return np.exp(-(((i-50)/100) ** 2 + + ((j-50)/100) ** 2) / .01) + y = np.fromfunction(create_gaussian_signals, (100, 100)) + + # Normalization is not strictly necessary, but good practice + x /= np.linalg.norm(x) + y /= np.linalg.norm(y) + + # full convolution, output will be the size of x + y + full_linear_output = convolve_linear(x, y, sum_matrix_dimensions(x, y)) + + # simple boundaries + simple_linear_output = convolve_linear(x, y, x.shape) + + np.savetxt("full_linear.dat", full_linear_output) + np.savetxt("simple_linear.dat", simple_linear_output) + + # creating simple circle and 2 different Gaussian kernels + circle = create_circle(50, 2, 0.5) + + circle = circle / np.linalg.norm(circle) + + small_kernel = create_gaussian_kernel(3) + large_kernel = create_gaussian_kernel(25) + + small_kernel_output = convolve_linear(circle, small_kernel, + sum_matrix_dimensions(circle, + small_kernel)) + + large_kernel_output = convolve_linear(circle, large_kernel, + sum_matrix_dimensions(circle, + large_kernel)) + + np.savetxt("small_kernel.dat", small_kernel_output) + np.savetxt("large_kernel.dat", large_kernel_output) + + circle = create_circle(50, 2, 0.5) + + # Normalization + circle = circle / np.linalg.norm(circle) + + # using the circle for sobel operations as well + sobel_output = compute_sobel(circle) + + np.savetxt("sobel_output.dat", sobel_output) + diff --git a/contents/convolutions/convolutional_theorem/code/julia/convolutional_theorem.jl b/contents/convolutions/convolutional_theorem/code/julia/convolutional_theorem.jl new file mode 100644 index 000000000..9016bf3d8 --- /dev/null +++ b/contents/convolutions/convolutional_theorem/code/julia/convolutional_theorem.jl @@ -0,0 +1,30 @@ +using FFTW +using LinearAlgebra +using DelimitedFiles + +# using the convolutional theorem +function convolve_fft(signal1::Array{T}, signal2::Array{T}) where {T <: Number} + return ifft(fft(signal1).*fft(signal2)) +end + +function main() + + # sawtooth functions for x and y + x = [float(i)/200 for i = 1:200] + y = [float(i)/200 for i = 1:200] + + # Normalization is not strictly necessary, but good practice + normalize!(x) + normalize!(y) + + # cyclic convolution via the convolutional theorem + fft_output = convolve_fft(x, y) + + # outputting convolutions to different files for plotting in external code + # note: we are outputting just the real component because the imaginary + # component is virtually 0 + writedlm("fft.dat", real(fft_output)) + +end + +main() diff --git a/contents/convolutions/convolutional_theorem/code/python/convolutional_theorem.py b/contents/convolutions/convolutional_theorem/code/python/convolutional_theorem.py new file mode 100644 index 000000000..f64f44a1d --- /dev/null +++ b/contents/convolutions/convolutional_theorem/code/python/convolutional_theorem.py @@ -0,0 +1,19 @@ +from scipy.fft import fft, ifft +import numpy as np + +# using the convolutional theorem +def convolve_fft(signal1, signal2): + return ifft(np.multiply(fft(signal1),fft(signal2))) + +# Sawtooth functions +x = [float(i)/200 for i in range(1,101)] +y = [float(i)/200 for i in range(1,101)] + +x /= np.linalg.norm(x) +y /= np.linalg.norm(y) + +# Convolving the two signals +fft_output = convolve_fft(x, y) + +np.savetxt("fft.dat", np.real(fft_output)) + diff --git a/contents/convolutions/convolutional_theorem/convolutional_theorem.md b/contents/convolutions/convolutional_theorem/convolutional_theorem.md new file mode 100644 index 000000000..b47bec179 --- /dev/null +++ b/contents/convolutions/convolutional_theorem/convolutional_theorem.md @@ -0,0 +1,82 @@ +# Convolutional Theorem + +Important note: this particular section will be expanded upon after the Fourier transform and Fast Fourier Transform (FFT) chapters have been revised. + +Now, let me tell you about a bit of computational magic: + +**Convolutions can be performed with Fourier Transforms!** + +This is crazy, but it is also incredibly hard to explain, so let me do my best. +As described in the chapter on [Fourier Transforms](../cooley_tukey/cooley_tukey.md), Fourier Transforms allow programmers to move from real space to frequency space. +When we transform a wave into frequency space, we can see a single peak in frequency space related to the frequency of that wave. +No matter what function we send into a Fourier Transform, the frequency-space image can be interpreted as a series of different waves with a specified frequency. +Each of these waves is parameterized by another $$e^{2\pi i k n / N}$$ term, where $$k$$ is the element's value in the frequency domain, $$n$$ is its value in the time domain, and $$N$$ is the overall length of the signal. +In this way, each wave can be seen as a complex exponential. + +So here's the idea: if we take two functions $$f(x)$$ and $$g(x)$$ and move them to frequency space to be $$\hat f(\xi)$$ and $$\hat g(\xi)$$, we can then multiply those two functions and transform them back into to blend the signals together. +In this way, we will have a third function that relates the frequency-space images of the two input functions. +This is known as the *convolution theorem* which looks something like this: + +$$\mathcal{F}(f*g) = \mathcal{F}(f) \cdot \mathcal{F}(g)$$ + +Where $$\mathcal{F}$$ denotes the Fourier Transform. + +At first, this might not seem particularly intuitive, but remember that frequency space is essentially composed of a set of exponentials. +As mentioned in the section about [Multiplication as a Convolution](../multiplication/multiplication.md), multiplication in base 10 space is also a convolution. +The convolutional theorem extends this concept into multiplication with *any* set of exponentials, not just base 10. +Obviously, this description is still lacking a bit of explanation, but I promise we will add more when revising the Fourier transform sections! + +By using a Fast Fourier Transform (FFT) in code, this can take a standard convolution on two arrays of length $$n$$, which is an $$\mathcal{O}(n^2)$$ process, to $$\mathcal{O}(n\log(n))$$. +This means that the convolution theorem is fundamental to creating fast convolutional methods for certain large inputs. + +{% method %} +{% sample lang="jl" %} +[import:5-8, lang:"julia"](code/julia/convolutional_theorem.jl) +{% endmethod %} + +This method also has the added advantage that it will *always output an array of the size of your signal*; however, if your signals are not of equal size, we need to pad the smaller signal with zeros. +Also note that the Fourier Transform is a periodic or cyclic operation, so there are no real edges in this method, instead the arrays "wrap around" to the other side, creating a cyclic convolution like we showed in the periodic boundary condition case for the [one-dimensional convolution](../1d/1d.md). + +## Example Code + +For this example code, we will be using two sawtooth functions as we did in the chapter on [one-dimensional convolutions](../1d/1d.md): + +{% method %} +{% sample lang="jl" %} +[import, lang:"julia"](code/julia/convolutional_theorem.jl) +{% sample lang="py" %} +[import, lang:"python"](code/python/convolutional_theorem.py) +{% endmethod %} + +This should produce the following output: + +

+ +

+ + + +## License + +##### Code Examples + +The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/main/LICENSE.md)). + +##### Images/Graphics + +- The image "[Cyclic](../res/cyclic.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). + + +##### Text + +The text of this chapter was written by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). + +[

](https://creativecommons.org/licenses/by-sa/4.0/) + +##### Pull Requests + +After initial licensing ([#560](https://github.com/algorithm-archivists/algorithm-archive/pull/560)), the following pull requests have modified the text or graphics of this chapter: +- none + diff --git a/contents/convolutions/convolutions.md b/contents/convolutions/convolutions.md new file mode 100644 index 000000000..afbd078f6 --- /dev/null +++ b/contents/convolutions/convolutions.md @@ -0,0 +1,46 @@ +# Convolutions +To put it bluntly, convolutions can be confusing. +Some might even call them *convoluted*! +(Get it? Because we are talking about *convolutions*? A wise man once told me that all good jokes need additional clarification.) + +Not only are convolutions hard to describe, but if they are not used in practice, it is hard to understand why they would ever be needed. +I am going to do what I can to describe them in an intuitive way; however, I may need to come back to this in the future. +Let me know if there is anything here that is unclear, and I will do what I can to clear it up. + +As always, we should start at the start. +If you take two functions $$f$$ and $$g$$, there are a number of ways you can combine them. +All basic operations can do this (addition, subtraction, multiplication, and division), but there are also special operations that only work with functions and do not work on standard variables or numbers. +For example, $$f \circ g$$ is a *composition* of the two functions, where you plug $$g(x)$$ into $$f$$. +A convolution is another function-related operation, and is often notated with a star $$(*)$$ operator, where + +$$ +f*g=c +$$ + +provides a third function, $$c$$, that is a blended version of $$f$$ and $$g$$. +As a rather important side-note: there is an incredibly similar operation known as a *correlation* which will be discussed in the near future. +Now we are left with a rather vague question: how do we *blend* functions? + +To answer this question, we will need to show off a few simple graphics and animations in the [Convolutions in 1D](1d/1d.md) section while also discussing the mathematical definition of convolutions. +After, there will be a brief discussion on an interesting application of one dimensional convolutions in integer multiplication in the [Multiplication as a Convolution](multiplication/multiplication.md) section. +We will then move on to the most stereotypical application of convolutions in the [Convolutions of Images](2d/2d.md) section, where we will also discuss two important filters: the Gaussian kernel and the Sobel operator. +As a note: convolutions can be extended to $$n$$-dimensions, but after seeing how they are extended to two dimensions, it should be possible for the reader to extend it to three dimensions and beyond if that is needed, so we will not cover that in great detail here unless is is useful for another algorithm. +In addition, we will be touching on a rather difficult but powerful topic with the [Convolutional Theorem](convolutional_theorem/convolutional_theorem.md) section where convolutions can be computed by using [Fourier transforms](../Cooley_tukey/cooley_tukey.md). + + + +## License + +##### Text + +The text of this chapter was written by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). + +[

](https://creativecommons.org/licenses/by-sa/4.0/) + +##### Pull Requests + +After initial licensing ([#560](https://github.com/algorithm-archivists/algorithm-archive/pull/560)), the following pull requests have modified the text or graphics of this chapter: +- none + diff --git a/contents/convolutions/multiplication/multiplication.md b/contents/convolutions/multiplication/multiplication.md new file mode 100644 index 000000000..0e5e30fa6 --- /dev/null +++ b/contents/convolutions/multiplication/multiplication.md @@ -0,0 +1,112 @@ +# Multiplication as a convolution + +As a brief aside, we will touch on a rather interesting side topic: the relation between integer multiplication and convolutions +As an example, let us consider the following multiplication: $$123 \times 456 = 56088$$. + +In this case, we might line up the numbers, like so: + +$$ +\begin{matrix} +&&1&2&3 \\ +&\times &4&5&6 \\ +\hline +5 & 6 & 0 & 8 & 8 +\end{matrix} +$$ + +Here, each column represents another power of 10, such that in the number 123, there is 1 100, 2 10s, and 3 1s. +So let us use a similar notation to perform the convolution, by reversing the second set of numbers and moving it to the right, performing an element-wise multiplication at each step: + +$$ +\begin{matrix} +&&&\color{red}1&2&3 \\ +\times &6&5&\color{red}4&& \\ +\hline +\end{matrix}\\ +\color{red}{1}\times\color{red}{4} = 4 +$$ + +$$ +\begin{matrix} +&&&\color{red}1&\color{green}2&3 \\ +\times &&6&\color{red}5&\color{green}4& \\ +\hline +\end{matrix}\\ +\color{red}1\times\color{red}5+\color{green}2\times\color{green}4=13 +$$ + +$$ +\begin{matrix} +&&&\color{red}1&\color{green}2&\color{blue}3 \\ +\times &&&\color{red}6&\color{green}5&\color{blue}4 \\ +\hline +\end{matrix}\\ +\color{red}1\times\color{red}6+\color{green}2\times\color{green}5+\color{blue}3\times\color{blue}4=28 +$$ + +$$ +\begin{matrix} +&&1&\color{green}2&\color{blue}3& \\ +\times &&&\color{green}6&\color{blue}5&4 \\ +\hline +\end{matrix}\\ +\color{green}2\times\color{green}6+\color{blue}3\times\color{blue}5=27 +$$ + +$$ +\begin{matrix} +&1&2&\color{blue}3&& \\ +\times &&&\color{blue}6&5&4 \\ +\hline +\end{matrix}\\ +\color{blue}3\times\color{blue}6=18 +$$ + +For these operations, any blank space should be considered a $$0$$. +In the end, we will have a new set of numbers: + +$$ +\begin{matrix} +&&1&2&3 \\ +&\times &4&5&6 \\ +\hline +4 & 13 & 28 & 27 & 18 +\end{matrix} +$$ + +Now all that is left is to perform the *carrying* operation by moving any number in the 10s digit to its left-bound neighbor. +For example, the numbers $$[4, 18]=[4+1, 8]=[5,8]$$ or 58. +For these numbers, + +$$ +\begin{matrix} +&4 & 13 & 28 & 27 & 18\\ +=&4+1 & 3+2 & 8+2 & 7+1 & 8\\ +=&5 & 5 & 10 & 8 & 8\\ +=&5 & 5+1 & 0 & 8 & 8\\ +=&5 & 6 & 0 & 8 & 8 +\end{matrix} +$$ + +Which give us $$123\times456=56088$$, the correct answer for integer multiplication. +I am not suggesting that we teach elementary school students to learn convolutions, but I do feel this is an interesting fact that most people do not know: integer multiplication can be performed with a convolution. + +This will be discussed in further detail when we talk about the Schonhage-Strassen algorithm, which uses this fact to perform multiplications for incredibly large integers. + + + +## License + +##### Text + +The text of this chapter was written by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). + +[

](https://creativecommons.org/licenses/by-sa/4.0/) + +##### Pull Requests + +After initial licensing ([#560](https://github.com/algorithm-archivists/algorithm-archive/pull/560)), the following pull requests have modified the text or graphics of this chapter: +- none + diff --git a/contents/convolutions/res/1d_gaussian.mp4 b/contents/convolutions/res/1d_gaussian.mp4 new file mode 100644 index 000000000..75252f154 Binary files /dev/null and b/contents/convolutions/res/1d_gaussian.mp4 differ diff --git a/contents/convolutions/res/1d_rand_gaussian_cyclic.mp4 b/contents/convolutions/res/1d_rand_gaussian_cyclic.mp4 new file mode 100644 index 000000000..8ba33ff18 Binary files /dev/null and b/contents/convolutions/res/1d_rand_gaussian_cyclic.mp4 differ diff --git a/contents/convolutions/res/1d_rand_gaussian_full.mp4 b/contents/convolutions/res/1d_rand_gaussian_full.mp4 new file mode 100644 index 000000000..9a8f1169f Binary files /dev/null and b/contents/convolutions/res/1d_rand_gaussian_full.mp4 differ diff --git a/contents/convolutions/res/1d_rand_gaussian_simple.mp4 b/contents/convolutions/res/1d_rand_gaussian_simple.mp4 new file mode 100644 index 000000000..929e4c5e0 Binary files /dev/null and b/contents/convolutions/res/1d_rand_gaussian_simple.mp4 differ diff --git a/contents/convolutions/res/1d_sawtooth.mp4 b/contents/convolutions/res/1d_sawtooth.mp4 new file mode 100644 index 000000000..1da346c25 Binary files /dev/null and b/contents/convolutions/res/1d_sawtooth.mp4 differ diff --git a/contents/convolutions/res/2d.mp4 b/contents/convolutions/res/2d.mp4 new file mode 100644 index 000000000..644f39168 Binary files /dev/null and b/contents/convolutions/res/2d.mp4 differ diff --git a/contents/convolutions/res/circle_blur.png b/contents/convolutions/res/circle_blur.png new file mode 100644 index 000000000..07d42bbc8 Binary files /dev/null and b/contents/convolutions/res/circle_blur.png differ diff --git a/contents/convolutions/res/cyclic.png b/contents/convolutions/res/cyclic.png new file mode 100644 index 000000000..53b6b6176 Binary files /dev/null and b/contents/convolutions/res/cyclic.png differ diff --git a/contents/convolutions/res/double_gaussian.mp4 b/contents/convolutions/res/double_gaussian.mp4 new file mode 100644 index 000000000..69483e71f Binary files /dev/null and b/contents/convolutions/res/double_gaussian.mp4 differ diff --git a/contents/convolutions/res/full_linear.png b/contents/convolutions/res/full_linear.png new file mode 100644 index 000000000..c8f627a6d Binary files /dev/null and b/contents/convolutions/res/full_linear.png differ diff --git a/contents/convolutions/res/heart_8bit.png b/contents/convolutions/res/heart_8bit.png new file mode 100644 index 000000000..d51ee45b2 Binary files /dev/null and b/contents/convolutions/res/heart_8bit.png differ diff --git a/contents/convolutions/res/sawtooth.png b/contents/convolutions/res/sawtooth.png new file mode 100644 index 000000000..2ab169253 Binary files /dev/null and b/contents/convolutions/res/sawtooth.png differ diff --git a/contents/convolutions/res/simple_linear.png b/contents/convolutions/res/simple_linear.png new file mode 100644 index 000000000..9cfade46c Binary files /dev/null and b/contents/convolutions/res/simple_linear.png differ diff --git a/contents/convolutions/res/sobel_filters.png b/contents/convolutions/res/sobel_filters.png new file mode 100644 index 000000000..7dbd105b9 Binary files /dev/null and b/contents/convolutions/res/sobel_filters.png differ diff --git a/contents/convolutions/res/square_wave.png b/contents/convolutions/res/square_wave.png new file mode 100644 index 000000000..9cd5ea4dd Binary files /dev/null and b/contents/convolutions/res/square_wave.png differ diff --git a/contents/convolutions/res/triangle_square_conv.mp4 b/contents/convolutions/res/triangle_square_conv.mp4 new file mode 100644 index 000000000..b36a3d7ef Binary files /dev/null and b/contents/convolutions/res/triangle_square_conv.mp4 differ diff --git a/contents/convolutions/res/triangle_wave.png b/contents/convolutions/res/triangle_wave.png new file mode 100644 index 000000000..9f4592c9d Binary files /dev/null and b/contents/convolutions/res/triangle_wave.png differ diff --git a/contents/cooley_tukey/code/asm-x64/SConscript b/contents/cooley_tukey/code/asm-x64/SConscript new file mode 100644 index 000000000..2a10fbc14 --- /dev/null +++ b/contents/cooley_tukey/code/asm-x64/SConscript @@ -0,0 +1,6 @@ +Import('files_to_compile env') + +for file_info in files_to_compile: + build_target = f'#/build/{file_info.language}/{file_info.chapter}/{file_info.path.stem}' + build_result = env.X64(build_target, str(file_info.path), LIBS='m', LINKFLAGS='-no-pie') + env.Alias(str(file_info.chapter), build_result) \ No newline at end of file diff --git a/contents/cooley_tukey/code/c/SConscript b/contents/cooley_tukey/code/c/SConscript new file mode 100644 index 000000000..bb40f4a85 --- /dev/null +++ b/contents/cooley_tukey/code/c/SConscript @@ -0,0 +1,6 @@ +Import('files_to_compile env') + +for file_info in files_to_compile: + build_target = f'#/build/{file_info.language}/{file_info.chapter}/{file_info.path.stem}' + build_result = env.C(build_target, str(file_info.path), LIBS=['m', 'fftw3']) + env.Alias(str(file_info.chapter), build_result) \ No newline at end of file diff --git a/contents/cooley_tukey/code/c/fft.c b/contents/cooley_tukey/code/c/fft.c index 90691f373..212b272b1 100644 --- a/contents/cooley_tukey/code/c/fft.c +++ b/contents/cooley_tukey/code/c/fft.c @@ -6,12 +6,12 @@ #include #include -void fft(double complex *x, int n) { +void fft(double complex *x, size_t n) { double complex y[n]; memset(y, 0, sizeof(y)); fftw_plan p; - p = fftw_plan_dft_1d(n, (fftw_complex*)x, (fftw_complex*)y, + p = fftw_plan_dft_1d((int)n, (fftw_complex*)x, (fftw_complex*)y, FFTW_FORWARD, FFTW_ESTIMATE); fftw_execute(p); @@ -27,7 +27,7 @@ void dft(double complex *X, const size_t N) { for (size_t i = 0; i < N; ++i) { tmp[i] = 0; for (size_t j = 0; j < N; ++j) { - tmp[i] += X[j] * cexp(-2.0 * M_PI * I * j * i / N); + tmp[i] += X[j] * cexp(-2.0 * M_PI * I * (double)j * (double)i / (double)N); } } @@ -49,16 +49,16 @@ void cooley_tukey(double complex *X, const size_t N) { cooley_tukey(X + N / 2, N / 2); for (size_t i = 0; i < N / 2; ++i) { - X[i + N / 2] = X[i] - cexp(-2.0 * I * M_PI * i / N) * X[i + N / 2]; + X[i + N / 2] = X[i] - cexp(-2.0 * I * M_PI * (double)i / (double)N) * X[i + N / 2]; X[i] -= (X[i + N / 2]-X[i]); } } } void bit_reverse(double complex *X, size_t N) { - for (int i = 0; i < N; ++i) { - int n = i; - int a = i; + for (size_t i = 0; i < N; ++i) { + size_t n = i; + size_t a = i; int count = (int)log2((double)N) - 1; n >>= 1; @@ -67,7 +67,7 @@ void bit_reverse(double complex *X, size_t N) { count--; n >>= 1; } - n = (a << count) & ((1 << (int)log2((double)N)) - 1); + n = (a << count) & (size_t)((1 << (size_t)log2((double)N)) - 1); if (n > i) { double complex tmp = X[i]; @@ -81,8 +81,8 @@ void iterative_cooley_tukey(double complex *X, size_t N) { bit_reverse(X, N); for (int i = 1; i <= log2((double)N); ++i) { - int stride = pow(2, i); - double complex w = cexp(-2.0 * I * M_PI / stride); + size_t stride = (size_t)pow(2, i); + double complex w = cexp(-2.0 * I * M_PI / (double)stride); for (size_t j = 0; j < N; j += stride) { double complex v = 1.0; for (size_t k = 0; k < stride / 2; ++k) { @@ -105,7 +105,7 @@ void approx(double complex *X, double complex *Y, size_t N) { } int main() { - srand(time(NULL)); + srand((unsigned int)time(NULL)); double complex x[64], y[64], z[64]; for (size_t i = 0; i < 64; ++i) { x[i] = rand() / (double) RAND_MAX; diff --git a/contents/cooley_tukey/code/clisp/fft.lisp b/contents/cooley_tukey/code/clisp/fft.lisp new file mode 100644 index 000000000..8fa7273a1 --- /dev/null +++ b/contents/cooley_tukey/code/clisp/fft.lisp @@ -0,0 +1,113 @@ + +(defun coefficient (time-index freq-index dft-len) + "Calculates a single twiddle factor for the Fourier Transform." + (exp (- (/ (* #c(0 1) 2.0 pi time-index freq-index) + dft-len)))) + +(defun dft (data) + "Performs the Discrete Fourier Transform" + (let ((dft-len (length data))) + (loop for freq-index from 0 below dft-len collect + (loop for time-index from 0 below dft-len sum + (* (coefficient time-index freq-index dft-len) (elt data time-index)))))) + +(defun merge-sub-ffts (evens odds) + "Combines the FFTs of the even and odd indices." + (let* ((fft-length (+ (length evens) (length odds))) + ;; Calculate coefficients for the odd indices. + (twiddle-factors (loop for i from 0 below (length odds) + collect (coefficient 1.0 i fft-length))) + ;; Multiply values with coefficients. + (odd-terms (mapcar #'* odds twiddle-factors))) + ;; Combine the two FFTs. + (concatenate 'list + (mapcar #'+ evens odd-terms) + (mapcar #'- evens odd-terms)))) + +(defun cooley-tukey-rec (data) + "Performs the Fourier Transform using the recursive Cooley-Tukey method." + (if (<= (length data) 1) + data + (loop + for i from 0 below (length data) + ;; Split even and odd indexed elements into two seperate lists. + if (evenp i) + collect (elt data i) into evens + else + collect (elt data i) into odds + finally + ;; Calculate the Fourier Transform for the two smaller lists and + ;; combine them into the Fourier Transform of the full input. + (return (merge-sub-ffts (cooley-tukey-rec evens) + (cooley-tukey-rec odds)))))) + +(defun reverse-bits (value num-bits) + "Reverses the bits of a value" + (if (= num-bits 1) + value + ;; Split bits into two parts. + (let* ((num-low-bits (floor (/ num-bits 2))) + (num-high-bits (- num-bits num-low-bits)) + (bit-mask (- (expt 2 num-low-bits) 1)) + (lower-half (logand value bit-mask)) + (upper-half (ash value (- num-low-bits)))) + ;; Reverse the bits of each part, then swap the results. + (logior (ash (reverse-bits lower-half num-low-bits) num-high-bits) + (reverse-bits upper-half num-high-bits))))) + +(defun bit-shuffle-indices (data) + "Rearanges the elements in a list according to their bit-reversed indices." + (loop + with num-bits = (floor (log (length data) 2)) + for i from 0 below (length data) + collect (elt data (reverse-bits i num-bits)))) + +(defun butterfly (a b coeff) + "Calculates a single butterfly." + (values (+ a (* coeff b)) (- a (* coeff b)))) + +(defun butterfly-group (data start stride) + "Calculates a single group of butterflies." + (dotimes (i stride) + ;; Take two elements which are stride apart and perform a butterfly on them. + (let* ((first-elt-index (+ start i)) + (second-elt-index (+ start i stride)) + (first-elt (elt data first-elt-index)) + (second-elt (elt data second-elt-index)) + (coeff (coefficient 1.0 i (* 2 stride)))) + (multiple-value-bind (sum difference) (butterfly first-elt second-elt coeff) + ;; Write results back into the list. + (setf (elt data first-elt-index) sum) + (setf (elt data second-elt-index) difference))))) + +(defun cooley-tukey-iter (data) + "Performs the Fourier Transform using the iterative Cooley-Tukey method." + (loop + ;; Bit-shuffle indices. + with shuffled-data = (bit-shuffle-indices data) + for stride = 1 then (* 2 stride) + while (< stride (length shuffled-data)) + do + ;; Compute butterfly groups for the current stride. + (loop for i from 0 below (length shuffled-data) by (* 2 stride) do + (butterfly-group shuffled-data i stride)) + finally (return shuffled-data))) + +(defun approx-eql (list1 list2) + (let ((diffs (mapcar #'(lambda (e1 e2) (abs (- e1 e2))) + list1 + list2))) + (loop for d in diffs always (< d 1e-9)))) + +(defun test-fft (data) + (let ((dft-result (dft data)) + (rec-result (cooley-tukey-rec data)) + (iter-result (cooley-tukey-iter data))) + (format T "~&DFT and recursive Cooley-Tukey approx. equal: ~a" + (approx-eql dft-result rec-result)) + (format T "~&DFT and iterative Cooley-Tukey approx. equal: ~a" + (approx-eql dft-result iter-result)) + (format T "~&Recursive Cooley-Tukey and iterative Cooley-Tukey approx. equal: ~a" + (approx-eql rec-result iter-result)))) + +(test-fft '(0.0 0.25 0.5 0.75 0.0 -0.25 -0.5 -0.75)) diff --git a/contents/cooley_tukey/code/c++/fft.cpp b/contents/cooley_tukey/code/cpp/fft.cpp similarity index 95% rename from contents/cooley_tukey/code/c++/fft.cpp rename to contents/cooley_tukey/code/cpp/fft.cpp index 5d4772f10..d4697d1df 100644 --- a/contents/cooley_tukey/code/c++/fft.cpp +++ b/contents/cooley_tukey/code/cpp/fft.cpp @@ -55,7 +55,7 @@ void cooley_tukey(Iter first, Iter last) { // now combine each of those halves with the butterflies for (int k = 0; k < size / 2; ++k) { - auto w = std::exp(complex(0, -2.0 * pi * k / size)); + auto w = std::exp(complex(0, -2.0 * pi * k / static_cast(size))); auto& bottom = first[k]; auto& top = first[k + size / 2]; @@ -78,7 +78,7 @@ void sort_by_bit_reverse(Iter first, Iter last) { b = (((b & 0xcccccccc) >> 2) | ((b & 0x33333333) << 2)); b = (((b & 0xf0f0f0f0) >> 4) | ((b & 0x0f0f0f0f) << 4)); b = (((b & 0xff00ff00) >> 8) | ((b & 0x00ff00ff) << 8)); - b = ((b >> 16) | (b << 16)) >> (32 - std::uint32_t(log2(size))); + b = ((b >> 16) | (b << 16)) >> (32 - std::uint32_t(log2(static_cast(size)))); if (b > i) { swap(first[b], first[i]); } diff --git a/contents/cooley_tukey/code/python/fft.py b/contents/cooley_tukey/code/python/fft.py index e7206e35f..6f4fe3735 100644 --- a/contents/cooley_tukey/code/python/fft.py +++ b/contents/cooley_tukey/code/python/fft.py @@ -2,56 +2,62 @@ from cmath import exp, pi from math import log2 + def dft(X): N = len(X) - temp = [0]*N + temp = [0] * N for i in range(N): for k in range(N): - temp[i] += X[k] * exp(-2.0j*pi*i*k/N) + temp[i] += X[k] * exp(-2.0j * pi * i * k / N) return temp + def cooley_tukey(X): - N = len(X) - if N <= 1: - return X - even = cooley_tukey(X[0::2]) - odd = cooley_tukey(X[1::2]) - - temp = [i for i in range(N)] - for k in range(N//2): - temp[k] = even[k] + exp(-2j*pi*k/N) * odd[k] - temp[k+N//2] = even[k] - exp(-2j*pi*k/N)*odd[k] - return temp - -def bitReverse(X): - N = len(X) - temp = [i for i in range(N)] - for k in range(N): - b = sum(1<<(int(log2(N))-1-i) for i in range(int(log2(N))) if k>>i&1) - temp[k] = X[b] - temp[b] = X[k] - return temp + N = len(X) + if N <= 1: + return X + even = cooley_tukey(X[0::2]) + odd = cooley_tukey(X[1::2]) + + temp = [i for i in range(N)] + for k in range(N // 2): + temp[k] = even[k] + exp(-2.0j * pi * k / N) * odd[k] + temp[k + N // 2] = even[k] - exp(-2.0j * pi * k / N) * odd[k] + return temp + + +def bit_reverse(X): + N = len(X) + temp = [i for i in range(N)] + for k in range(N): + b = sum(1 << int(log2(N)) - 1 - + i for i in range(int(log2(N))) if k >> i & 1) + temp[k] = X[b] + temp[b] = X[k] + return temp + def iterative_cooley_tukey(X): - N = len(X) + N = len(X) + + X = bit_reverse(X) - X = bitReverse(X) + for i in range(1, int(log2(N)) + 1): + stride = 2 ** i + w = exp(-2.0j * pi / stride) + for j in range(0, N, stride): + v = 1 + for k in range(stride // 2): + X[k + j + stride // 2] = X[k + j] - v * X[k + j + stride // 2] + X[k + j] -= X[k + j + stride // 2] - X[k + j] + v *= w + return X - for i in range(1, int(log2(N)) + 1): - stride = 2**i - w = exp(-2j*pi/stride) - for j in range(0, N, stride): - v = 1 - for k in range(stride//2): - X[k + j + stride//2] = X[k + j] - v*X[k + j + stride//2]; - X[k + j] -= (X[k + j + stride//2] - X[k + j]); - v *= w; - return X X = [] for i in range(64): - X.append(random()) + X.append(random()) Y = cooley_tukey(X) Z = iterative_cooley_tukey(X) diff --git a/contents/cooley_tukey/code/rust/Cargo.toml b/contents/cooley_tukey/code/rust/Cargo.toml new file mode 100644 index 000000000..0cba5179d --- /dev/null +++ b/contents/cooley_tukey/code/rust/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "rust" +version = "0.1.0" +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +rand = "0.7.3" +rustfft = "4.1.0" + +[[bin]] +path = "./fft.rs" +name = "main" \ No newline at end of file diff --git a/contents/cooley_tukey/code/rust/fft.rs b/contents/cooley_tukey/code/rust/fft.rs new file mode 100644 index 000000000..01bc8bab4 --- /dev/null +++ b/contents/cooley_tukey/code/rust/fft.rs @@ -0,0 +1,120 @@ +extern crate rand; +extern crate rustfft; + +use rand::prelude::*; +use rustfft::num_complex::Complex; +use rustfft::FFTplanner; +use std::f64::consts::PI; + +// This is based on the Python and C implementations. + +fn fft(x: &[Complex]) -> Vec> { + let n = x.len(); + let mut new_x = x.to_vec(); + let mut y = vec![Complex::new(0.0_f64, 0.0_f64); n]; + + let mut planner = FFTplanner::new(false); + let this_fft = planner.plan_fft(n); + this_fft.process(new_x.as_mut_slice(), y.as_mut_slice()); + + // y.into_iter().map(|i| i / (n as f64).sqrt()).collect() + y +} + +fn dft(x: &[Complex]) -> Vec> { + let n = x.len(); + (0..n) + .map(|i| { + (0..n) + .map(|k| { + x[k] * (Complex::new(0.0_f64, -2.0_f64) * PI * (i as f64) * (k as f64) + / (n as f64)) + .exp() + }) + .sum() + }) + .collect() +} + +fn cooley_tukey(x: &[Complex]) -> Vec> { + let n = x.len(); + if n <= 1 { + return x.to_owned(); + } + let even = cooley_tukey(&x.iter().step_by(2).cloned().collect::>()); + let odd = cooley_tukey(&x.iter().skip(1).step_by(2).cloned().collect::>()); + + let mut temp = vec![Complex::new(0.0_f64, 0.0_f64); n]; + for k in 0..(n / 2) { + temp[k] = even[k] + + (Complex::new(0.0_f64, -2.0_f64) * PI * (k as f64) / (n as f64)).exp() * odd[k]; + temp[k + n / 2] = even[k] + - (Complex::new(0.0_f64, -2.0_f64) * PI * (k as f64) / (n as f64)).exp() * odd[k]; + } + temp +} + +fn bit_reverse(x: &[Complex]) -> Vec> { + let n = x.len(); + let mut temp = vec![Complex::new(0.0_f64, 0.0_f64); n]; + for k in 0..n { + let b: usize = (0..((n as f64).log2() as usize)) + .filter(|i| k >> i & 1 != 0) + .map(|i| 1 << ((((n as f64).log2()) as usize) - 1 - i)) + .sum(); + temp[k] = x[b]; + temp[b] = x[k]; + } + temp +} + +fn iterative_cooley_tukey(x: &[Complex]) -> Vec> { + let n = x.len(); + + let mut new_x = bit_reverse(x); + + for i in 1..=((n as f64).log2() as usize) { + let stride = 2_u128.pow(i as u32); + let w = (Complex::new(0.0_f64, -2.0_f64) * PI / (stride as f64)).exp(); + for j in (0..n).step_by(stride as usize) { + let mut v = Complex::new(1.0_f64, 0.0_f64); + for k in 0..((stride / 2) as usize) { + new_x[k + j + ((stride / 2) as usize)] = + new_x[k + j] - v * new_x[k + j + ((stride / 2) as usize)]; + new_x[k + j] = + new_x[k + j] - (new_x[k + j + ((stride / 2) as usize)] - new_x[k + j]); + v *= w; + } + } + } + + new_x +} + +fn main() { + let mut x = Vec::with_capacity(64); + let mut rng = thread_rng(); + for _i in 0..64 { + let real = rng.gen_range(0.0_f64, 1.0_f64); + x.push(Complex::new(real, 0.0_f64)); + } + let v = fft(&x); + let y = cooley_tukey(&x); + let z = iterative_cooley_tukey(&x); + let t = dft(&x); + + println!( + "{}", + v.iter().zip(y.iter()).all(|i| (i.0 - i.1).norm() < 1.0) + ); + println!( + "{}", + v.iter().zip(z.iter()).all(|i| (i.0 - i.1).norm() < 1.0) + ); + println!( + "{}", + v.iter() + .zip(t.into_iter()) + .all(|i| (i.0 - i.1).norm() < 1.0) + ); +} diff --git a/contents/cooley_tukey/cooley_tukey.md b/contents/cooley_tukey/cooley_tukey.md index 1b7066063..d528832d1 100644 --- a/contents/cooley_tukey/cooley_tukey.md +++ b/contents/cooley_tukey/cooley_tukey.md @@ -5,7 +5,7 @@ On the surface, the algorithm seems like a simple application of recursion, and From calculating superfluid vortex positions to super-resolution imaging, Fourier Transforms lay at the heart of many scientific disciplines and are essential to many algorithms we will cover later in this book. Simply put, the Fourier Transform is a beautiful application of complex number systems; however, it would rarely be used today if not for the ability to quickly perform the operation with Fast Fourier Transform, first introduced by the great Frederick Gauss in 1805 and later independently discovered by James Cooley and John Tukey in 1965 {{ "ct1965" | cite }}. -Gauss (of course) already had too many things named after him and Cooley and Tukey both had cooler names, so the most common algorithm for FFT's today is known as the Cooley-Tukey algorithm. +Gauss (of course) already had too many things named after him and Cooley and Tukey both had cooler names, so the most common algorithm for FFTs today is known as the Cooley-Tukey algorithm. ### What is a Fourier Transform? @@ -29,7 +29,7 @@ Each constituent wave can be described by only one value: $$\omega$$. So, instead of representing these curves as seen above, we could instead describe them as peaks in frequency space, as shown below.

- +

This is what the Fourier Transform does! @@ -76,17 +76,19 @@ For some reason, though, putting code to this transformation really helped me fi {% sample lang="clj" %} [import:15-30, lang:"clojure"](code/clojure/fft.clj) {% sample lang="cpp" %} -[import:23-33, lang:"cpp"](code/c++/fft.cpp) +[import:23-33, lang:"cpp"](code/cpp/fft.cpp) {% sample lang="hs" %} [import:7-13, lang:"haskell"](code/haskell/fft.hs) {% sample lang="py" %} -[import:5-11, lang:"python"](code/python/fft.py) -{% sample lang="scratch" %} -[import:4-13, lang:"julia"](code/julia/fft.jl) +[import:6-12, lang:"python"](code/python/fft.py) {% sample lang="asm-x64" %} [import:15-74, lang:"asm-x64"](code/asm-x64/fft.s) {% sample lang="js" %} [import:3-15, lang:"javascript"](code/javascript/fft.js) +{% sample lang="rs" %} +[import:24-37, lang:"rust"](code/rust/fft.rs) +{% sample lang="lisp" %} +[import:2-12, lang:"lisp"](code/clisp/fft.lisp) {% endmethod %} In this function, we define `n` to be a set of integers from $$0 \rightarrow N-1$$ and arrange them to be a column. @@ -111,7 +113,7 @@ Recursion! The problem with using a standard DFT is that it requires a large matrix multiplications and sums over all elements, which are prohibitively complex operations. The Cooley-Tukey algorithm calculates the DFT directly with fewer summations and without matrix multiplications. -If necessary, DFT's can still be calculated directly at the early stages of the FFT calculation. +If necessary, DFTs can still be calculated directly at the early stages of the FFT calculation. The trick to the Cooley-Tukey algorithm is recursion. In particular, we split the matrix we wish to perform the FFT on into two parts: one for all elements with even indices and another for all odd indices. We then proceed to split the array again and again until we have a manageable array size to perform a DFT (or similar FFT) on. @@ -127,17 +129,19 @@ In the end, the code looks like: {% sample lang="clj" %} [import:31-58, lang:"clojure"](code/clojure/fft.clj) {% sample lang="cpp" %} -[import:36-66, lang:"cpp"](code/c++/fft.cpp) +[import:36-66, lang:"cpp"](code/cpp/fft.cpp) {% sample lang="hs" %} [import:15-28, lang:"haskell"](code/haskell/fft.hs) {% sample lang="py" %} -[import:13-24, lang:"python"](code/python/fft.py) -{% sample lang="scratch" %} -[import:16-32, lang:"julia"](code/julia/fft.jl) +[import:15-26, lang:"python"](code/python/fft.py) {% sample lang="asm-x64" %} [import:76-165, lang:"asm-x64"](code/asm-x64/fft.s) {% sample lang="js" %} [import:17-39, lang="javascript"](code/javascript/fft.js) +{% sample lang="rs" %} +[import:39-55, lang:"rust"](code/rust/fft.rs) +{% sample lang="lisp" %} +[import:14-42, lang:"lisp"](code/clisp/fft.lisp) {% endmethod %} As a side note, we are enforcing that the array must be a power of 2 for the operation to work. @@ -163,7 +167,7 @@ For example, imagine we need to perform an FFT of an array of only 2 elements. We can represent this addition with the following (radix-2) butterfly:

- +

Here, the diagram means the following: @@ -177,7 +181,7 @@ $$ However, it turns out that the second half of our array of $$\omega$$ values is always the negative of the first half, so $$\omega_2^0 = -\omega_2^1$$, so we can use the following butterfly diagram:

- +

With the following equations: @@ -194,7 +198,7 @@ In this case, we start with simple butterflies, as shown above, and then sum but For example, if we have 8 elements, this might look like this:

- +

Note that we can perform a DFT directly before using any butterflies, if we so desire, but we need to be careful with how we shuffle our array if that's the case. @@ -203,7 +207,7 @@ In the code snippet provided in the previous section, the subdivision was perfor For example, take a look at the ordering of FFT ([found on wikipedia](https://en.wikipedia.org/wiki/Butterfly_diagram)) that performs the DFT shortcut:

- +

Here, the ordering of the array was simply divided into even and odd elements once, but they did not recursively divide the arrays of even and odd elements again because they knew they would perform a DFT soon thereafter. @@ -225,7 +229,7 @@ To be clear, the example code this time will be complicated and requires the fol As mentioned in the text, the Cooley-Tukey algorithm may be implemented either recursively or non-recursively, with the recursive method being much easier to implement. I would ask that you implement either the recursive or non-recursive methods (or both, if you feel so inclined). If the language you want to write your implementation in is already used, please append your code to the already existing codebase. -As before, pull requests are favoured. +As before, pull requests are favored. Note: I implemented this in Julia because the code seems more straightforward in Julia; however, if you wish to write better Julia code or better code in your own language, please feel free to do so! **I do not claim that this is the most efficient way to implement the Cooley-Tukey method, so if you have a better way to do it, feel free to implement it that way!** @@ -238,17 +242,19 @@ Note: I implemented this in Julia because the code seems more straightforward in {% sample lang="clj" %} [import, lang:"clojure"](code/clojure/fft.clj) {% sample lang="cpp" %} -[import, lang:"cpp"](code/c++/fft.cpp) +[import, lang:"cpp"](code/cpp/fft.cpp) {% sample lang="hs" %} [import, lang:"haskell"](code/haskell/fft.hs) {% sample lang="py" %} [import, lang:"python"](code/python/fft.py) -{% sample lang="scratch" %} -Some rather impressive scratch code was submitted by Jie and can be found here: https://scratch.mit.edu/projects/37759604/#editor {% sample lang="asm-x64" %} [import, lang:"asm-x64"](code/asm-x64/fft.s) {% sample lang="js" %} [import, lang:"javascript"](code/javascript/fft.js) +{% sample lang="rs" %} +[import, lang:"rust"](code/rust/fft.rs) +{% sample lang="lisp" %} +[import, lang:"lisp"](code/clisp/fft.lisp) {% endmethod %} + +## License +The text of this chapter was written by [Liikt](https://github.com/Liikt) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +The code examples are licensed under the MIT license (found in LICENSE.md). + +##### Code Examples + +The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/master/LICENSE.md)). + +##### Text + +The text of this chapter was written by [Liikt](https://github.com/Liikt) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). + +[

](https://creativecommons.org/licenses/by-sa/4.0/) diff --git a/contents/cryptography/res/table.jl b/contents/cryptography/res/table.jl new file mode 100644 index 000000000..314c238c4 --- /dev/null +++ b/contents/cryptography/res/table.jl @@ -0,0 +1,33 @@ +function print_table(a::Array{T, 2}, + header::Vector{String}) where T <: Union{Char, String} + print(" | ") + for i = 1:length(header) + print(string(header[i]), " | ") + end + println() + + print(" | ") + for i = 1:length(header) + print("---", " | ") + end + println() + + for i = 1:size(a)[1] + print(" | ") + for j = 1:size(a)[2] + print(string(a[i,j]), " | ") + end + println() + end +end + +alphabet = [char for char = 'a':'z'] +offsets = Int.([0, 0, 2, 14, 18, 21, 24]) +alphabet_array = Array{Char}(undef, 26, length(offsets)) + +for i = 1:length(offsets) + alphabet_array[:,i] = vcat(alphabet[offsets[i]+1:26],alphabet[1:offsets[i]]) +end + +header = vcat(string.(offsets)) +print_table(alphabet_array, header) diff --git a/contents/data_compression/data_compression.md b/contents/data_compression/data_compression.md index 1f48c0c99..5a04c3706 100644 --- a/contents/data_compression/data_compression.md +++ b/contents/data_compression/data_compression.md @@ -130,7 +130,7 @@ MathJax.Hub.Queue(["Typeset",MathJax.Hub]); ##### Code Examples -The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/master/LICENSE.md)). +The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/main/LICENSE.md)). ##### Text diff --git a/contents/data_structures/data_structures.md b/contents/data_structures/data_structures.md index 11ae7aa4b..60680b95e 100644 --- a/contents/data_structures/data_structures.md +++ b/contents/data_structures/data_structures.md @@ -7,7 +7,7 @@ The fundamental building blocks of algorithms are data structures, and thus as m ##### Code Examples -The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/master/LICENSE.md)). +The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/main/LICENSE.md)). ##### Text diff --git a/contents/decision_problems/decision_problems.md b/contents/decision_problems/decision_problems.md index 064c1d067..2ab9ed612 100644 --- a/contents/decision_problems/decision_problems.md +++ b/contents/decision_problems/decision_problems.md @@ -14,7 +14,7 @@ MathJax.Hub.Queue(["Typeset",MathJax.Hub]); ##### Code Examples -The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/master/LICENSE.md)). +The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/main/LICENSE.md)). ##### Text diff --git a/contents/differential_equations/differential_equations.md b/contents/differential_equations/differential_equations.md index 3cfbba1fa..0bb08349c 100644 --- a/contents/differential_equations/differential_equations.md +++ b/contents/differential_equations/differential_equations.md @@ -7,7 +7,7 @@ Here, we discuss many different methods to solve particular sets of differential ##### Code Examples -The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/master/LICENSE.md)). +The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/main/LICENSE.md)). ##### Text diff --git a/contents/domain_coloring/code/gnuplot/domain_coloring.gp b/contents/domain_coloring/code/gnuplot/domain_coloring.gp new file mode 100644 index 000000000..525de4abb --- /dev/null +++ b/contents/domain_coloring/code/gnuplot/domain_coloring.gp @@ -0,0 +1,81 @@ +# setting output to file of size 800 x 800 +set terminal pngcairo size 1000, 1000 +set output 'domain.png' + +# sets title for full plot +set title 'f(z)=z^2' + +# removes legend +unset key + +# projects image onto 2D plane +set view map + +# sets aspect ratio of plot to be square +set size square + +# sets x and y range and labels +set xrange[-2:2] +set yrange[-2:2] + +set xlabel "Re(z)" +set ylabel "Im(z)" + +# scaling the x, y, and colorbar tics to zero so they are not seen in the plot +set xtics border scale 0,0 +set ytics border scale 0,0 +set cbtics border scale 0,0 + +# sets tics in color bar at 0 and 2pi +set cbtics ("0" -3.14159, '2pi' 3.14159) + +set cblabel "Phase Angle" +set cbrange [ -3.14159 : 3.14159 ] + +# use hsv for colorbar and set palette to use full hsv space +set palette model HSV +set palette defined ( 0 0 1 1, 1 1 1 1 ) + +# setting isosamples for output grid and samples for input grid +set isosamples 2000, 2000 +set samples 2000, 2000 + +# setting functions necessary for domain coloring +# setting threshold for gridlines. Smaller threshold will make smaller lines +thresh = 0.1 +f(z) = z**2 + +# atan2 returns a range from -pi to pi, so we need to add pi, but this offsets +# the value by 180 degrees, so we also imput (-y, -x) for another 180 degrees +# to invert rotation +angle(x,y) = (pi + atan2(-y,-x)) / (2*pi) + +# complex magnitude +r(x,y) = sqrt(x*x + y*y) + +# complex phase and magnitude +theta(x,y) = atan2(y,x) +z(x,y) = r(x,y)*exp(theta(x,y)*sqrt(-1)) + +# imaginary and real output functions +imaginary_f(z) = imag(f(z)) +real_f(z) = real(f(z)) + +# magnitude contours +magnitude_shading(x,y) = 0.5 + 0.5*(abs(f(z(x,y)))-floor(abs(f(z(x,y))))) + +# gridlines +gridlines(x,y) = (abs(sin(real_f(z(x,y))*pi)**thresh) \ + * abs(sin(imaginary_f(z(x,y))*pi))**thresh) + +# overall coloring function +color(x,y) = hsv2rgb(angle(real_f(z(x,y)), imaginary_f(z(x,y))), \ + magnitude_shading(x,y), \ + gridlines(x,y)) + +save_encoding = "utf8" + +# Last datafile plotted: "++" +# In this case, it means, "plot the data file created with the +# samples and isosamples" +splot '++' using 1:2:(color($1,$2)) with pm3d lc rgb variable nocontour diff --git a/contents/domain_coloring/code/python/domain_coloring.py b/contents/domain_coloring/code/python/domain_coloring.py new file mode 100644 index 000000000..cb8be03f5 --- /dev/null +++ b/contents/domain_coloring/code/python/domain_coloring.py @@ -0,0 +1,83 @@ + +import numpy as np +import matplotlib.pyplot as plt +import matplotlib.colors +from matplotlib.cm import ScalarMappable + + +def f(z): + return z**2 + + +def magnitude_shading(f_val): + f_val_abs = np.abs(f_val) + return 0.5 + 0.5 * (f_val_abs - np.floor(f_val_abs)) + + +def gridlines(f_val, threshold): + return (np.abs(np.sin(np.pi * np.real(f_val))) ** threshold + * np.abs(np.sin(np.pi * np.imag(f_val))) ** threshold) + + +def color(f_val, threshold): + hue = (np.pi - np.angle(f_val)) / (2.0 * np.pi) + saturation = magnitude_shading(f_val) + value = gridlines(f_val, threshold) + + # Currently we have a tuple of 2D-arrays (hue, saturation, value). + # This makes it a 2D-array of tuples, which the conversion function requires. + hsv = np.moveaxis((hue, saturation, value), 0, -1) + return matplotlib.colors.hsv_to_rgb(hsv) + + +if __name__ == "__main__": + # Create a new figure containing a single plot + fig, axes = plt.subplots(1, 1) + + # Set the title for the plot + axes.set_title("$f(x)=z^2$") + + # Create color bar + cbar = fig.colorbar( + ScalarMappable(matplotlib.colors.Normalize(0.0, 2.0 * np.pi), "hsv"), + ax=axes, + label="Phase Angle") + + # Set x and y labels + axes.set_xlabel("$Re(z)$") + axes.set_ylabel("$Im(z)$") + + # Set color bar tick locations and labels + cbar.set_ticks([0.0, np.pi, 2.0 * np.pi]) + cbar.set_ticklabels(["$0.0$", "$\pi$", "$2\pi$"]) + + # Hide x and y ticks + for tick in axes.get_xticklines(): + tick.set_visible(False) + + for tick in axes.get_yticklines(): + tick.set_visible(False) + + # Create a 500x500 input grid + coords = np.linspace(-2.0, 2.0, 500) + z_real, z_imag = np.meshgrid(coords, coords) + z = z_real + 1j * z_imag + + # Calculate function values + f_val = f(z) + + # Map function values to colors + colors = color(f_val, 0.1) + + # Plot the colors + # extent=(-2.0, 2.0, -2.0, 2.0) sets the x and y ranges + # origin="lower" places index (0,0) of the color array in the lower-left corner + # aspect="equal" ensures that the plot is square + axes.imshow( + colors, + extent=(-2.0, 2.0, -2.0, 2.0), + origin="lower", + aspect="equal") + + # Save output + fig.savefig("domain.png") diff --git a/contents/domain_coloring/domain_coloring.md b/contents/domain_coloring/domain_coloring.md new file mode 100644 index 000000000..746c52241 --- /dev/null +++ b/contents/domain_coloring/domain_coloring.md @@ -0,0 +1,217 @@ +# Domain coloring + +Domain coloring is a much more complicated plotting technique than those outlined in the [plotting chapter](../plotting/plotting.md) and is used to plot complex functions where both the input and output have imaginary and real components. +For the code in this chapter, we will focus on languages that are easily able to plot two-dimensional images or heat maps, instead of languages meant for number-crunching. +That is to say that this chapter will certainly have a code implementation in gnuplot, but it will not likely have an implementation in C, Fortran, or Java because these languages do not have plotting capabilities in-built. + +To start, imagine the following function: $$f(z) = z^2$$. +In this case, we could create a plot that looks like this: + +

+ +

+ +This indicates that for various input values along $$z$$, we have different function outputs from $$f(z)$$. +For this function, $$z\in\mathbb{R}$$ is purely in real space and because of this, the output is also in real space. +Now let's imagine another function with complex input $$(z \in \mathbb{C})$$, but a purely real output $$(f(z) \in \mathbb{R})$$: + +$$ +f(z) = |z| +$$ + +In this case, this can be plotted as a two-dimensional dataset like so: + +

+ +

+ +Here, the $$x$$-axis and $$y$$-axis represent the imaginary and real components of the input variable, respectively. +The color bar represents the output of $$f(z)$$. + +At this point, we can start to see the problem. +If the output of $$f(z)$$ also requires plotting of real and imaginary components, then we would need four dimensions to appropriately represent the full function space, one axis for the real component and another for the imaginary component of both the input ($$z$$) and the output of $$f(z)$$! +Unfortunately, feeble human minds are incapable of understanding four spatial dimensions without projecting onto lower dimensionality, so we need to improvise. + +We do this by assuming the complex output can be represented in the following form: + +$$ +z = re^{i \theta} = r(\cos(\theta) + i\sin(\theta)) +$$ + +where, $$r$$ is a complex magnitude and $$\theta$$ is a complex phase. +This is the formula for a circle in the complex plane and we can easily find $$r$$ and $$\theta$$ like so: + +$$ +\begin{align} + r &= \sqrt{\text{Re}(z)^2 + \text{Im}(z)^2} \\ + \theta &= \text{atan}\left(\frac{\text{Im}(z)}{\text{Re}(z)}\right) +\end{align} +$$ + +Once we have our complex function output in this form, we then color the output domain according to a color space with at least 2 independent dimensions, like RGB (Red, Green, Blue), or HSV (Hue, Saturation, Value) {{ "hsv" | cite }}. +The choice of color space is completely dependent on what the users feel is most visually intuitive. +In any case, one dimension of the color system will be used to represent the complex magnitude and another dimension of the color system will be used to represent the complex phase of the output. +The $$xy$$ grid will be representing the real and imaginary inputs to these functions. +That is to say, we plug every value in the 2D complex plane into the function and then color each pixel based on the function output. + +As an example, let's look at the simplest function we can $$f(z) = z$$, but in this case $$z \in \mathbb{C}$$. +If we use an RGB color scheme, where red represents $$\theta$$ and blue represents $$r$$, we can generate the following image: + +

+ +

+ +As a note here, there is a clear phase discontinuity along the horizontal axis, which is a consequence of the fact that +the complex phase wraps around the origin, ranging from 0 (clear) to $$2\pi$$ (red). +In addition, the edges of the plot are blue because the function's magnitude increases linearly as we move from the origin. + +If we instead look at the function $$f(z) = z^2$$, we can generate a similar plot: + +

+ +

+ +Here, it is clear that the complex phase wraps around the origin twice, creating two separate phase discontinuities on top of each other. +This indicates a $$4\pi$$ phase winding. +For some purposes, such as vortex tracking for inviscid fluids, this visualization is ideal, because a vortex is located precisely at the center of the phase discontinuity {{ "schloss2019" | cite }} {{ "pethick2008" | cite }}. +For other purposes, the discontinuity is visually distracting, and for this reason, many people use an HSV scheme for plotting complex functions {{ "wegert2012" | cite }} {{ "poelkedomain" | cite }} {{ "lundmark2004" | cite }}. +So here is the same function $$\left(f(z)=z^2\right)$$, but using hue to represent the complex phase and saturation to represent the magnitude: + +

+ +

+ +In this plot, the Value for HSV was always set to 1. +When looking at the edges of the plot, the hue changes rapidly, but each color is mirrored on the opposite edge. +This indicates the $$4\pi$$ phase winding we saw in the RGB plot. +Also, because the complex magnitude increases as we move further from the center of the plot, the saturation also increases. +Thus the center of the plot is completely washed out! +We need to fix this in subsequent plots to make them more representative of the actual data. + +One easy way to show the increasing complex magnitude without sacrificing phase information is by using contours. +Essentially, at ever integer value of the magnitude, we want to draw some kind of line. +There are a number of ways to generate these lines, and one simple way is by using an alternative shading function like so: + +$$ +g(r) = r-\lfloor r \rfloor. +$$ + +This will create the following image: + +

+ +

+ +This function will essentially create a smooth gradient, but because of the floor operation $$\left(\lfloor \cdot \rfloor \right)$$, the saturation will go from 0 to 1 between each integer value of the magnitude. +Here, it is clear that the magnitude is increasing as $$z^2$$ from the origin; however, because the saturation is fluctuating so much, it is difficult to see the phase pattern next to each contour. +This can be fixed simply by adding an offset to the shading function such that, + +$$ +g(r) = \frac{1}{2} + \frac{1}{2}\left(r-\lfloor r \rfloor \right). +$$ + +Which will produce the following image: + +

+ +

+ +This means that the saturation will fluctuate from $$\frac12$$ to 1 instead of from 0 to 1, which makes it way easier to see phase information next to contours. +Again, there are a lot of different ways to play with these equations, so feel free to use whatever function you want! +As long as some sort of rounding operation is used to establish some form of integer value for the magnitude, it should be possible to create contours of various types. + +At this point, changing the saturation shows changes in the complex magnitude, and changing the hue shows changes in the complex phase. +Unfortunately, neither the magnitude nor the phase directly show what is happening in real or imaginary space with the output. +To show this, we might want to draw grid lines that color our pixels black whenever the imaginary or real components of the output function are integer values. + +For example, let's go back to a simpler function $$f(z) = z$$. +If we draw lines on this plot, corresponding to integer values in the output, we get a simple grid + +

+ +

+ +Like before, the choice of which function to use in order to create the grid lines is somewhat arbitrary. +It is important to choose a function that sharply drops to 0 or peaks at 1 for all integer values, and then we simply plug values of $$f(z)$$ into this function. +For the purposes of this chapter, we chose the following function + +$$ +h(z) = |\sin(\pi\times\text{Re}(f(z)))^t|\times|\sin(\pi\times\text{Im}(f(z)))^t|, +$$ + +where $$t$$ is some threshold value, and was set to be 0.1 in our plot. +A plot of $$h(z)$$ for $$f(z) = z$$ where $$z\in\mathbb{R}$$ is shown below: + +

+ +

+ +So, putting it all together and returning to the function of $$f(z) = z^2$$, we find the following image. + +

+ +

+ +Here, the diagonal lines through the center represent integer values along the imaginary axis for $$f(z)$$ and the vertical and horizontal lines represent integer values of the real axis for $$f(z)$$. +An easy way to determine which lines correspond to which integer values is by plugging in certain values for $$z$$ into $$f(z)$$. +For example, there is a black line at $$z = 1 + 1i$$ where $$f(z) = 2i$$, this means that all values along that contour correspond to values that are constrained to having an imaginary component of precisely 2. + +Overall, there are plenty of interesting ways to plot complex functions and make really compelling and beautiful images! +We will be using domain coloring in other contexts throughout this text when describing methods that heavily use complex space. + +## Video Explanation + +Here is a video describing domain coloring: + +
+ +
+ +## Example Code + +Here is the full script to generate a domain colored output of $$f(z)=z^2$$. + +{% method %} +{% sample lang="gnuplot" %} +[import, lang:"gnuplot"](code/gnuplot/domain_coloring.gp) +{% sample lang="python & matplotlib" %} +[import, lang:"python"](code/python/domain_coloring.py) +{% endmethod %} + +### Bibliography + +{% references %} {% endreferences %} + + + +## License + +##### Code Examples + +The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/main/LICENSE.md)). + +##### Text + +The text of this chapter was written by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). + +[

](https://creativecommons.org/licenses/by-sa/4.0/) + +##### Images/Graphics + +- The image "[z2](res/z2.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[absz](res/absz.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[rgb1](res/rgb1.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[rgb2](res/rgb2.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[hsv1](res/hsv.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[hsv2](res/hsv2.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[hsv3](res/hsv3.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[hsv4](res/hsv4.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[shade](res/shade.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[hsv5](res/hsv5.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). + +##### Pull Requests + +The following pull requests have modified the text or graphics of this chapter: +- none diff --git a/contents/domain_coloring/res/absz.png b/contents/domain_coloring/res/absz.png new file mode 100644 index 000000000..9c2715043 Binary files /dev/null and b/contents/domain_coloring/res/absz.png differ diff --git a/contents/domain_coloring/res/hsv.png b/contents/domain_coloring/res/hsv.png new file mode 100644 index 000000000..8f994940c Binary files /dev/null and b/contents/domain_coloring/res/hsv.png differ diff --git a/contents/domain_coloring/res/hsv2.png b/contents/domain_coloring/res/hsv2.png new file mode 100644 index 000000000..b4672f542 Binary files /dev/null and b/contents/domain_coloring/res/hsv2.png differ diff --git a/contents/domain_coloring/res/hsv3.png b/contents/domain_coloring/res/hsv3.png new file mode 100644 index 000000000..cfca1081c Binary files /dev/null and b/contents/domain_coloring/res/hsv3.png differ diff --git a/contents/domain_coloring/res/hsv4.png b/contents/domain_coloring/res/hsv4.png new file mode 100644 index 000000000..bf97be4a5 Binary files /dev/null and b/contents/domain_coloring/res/hsv4.png differ diff --git a/contents/domain_coloring/res/hsv5.png b/contents/domain_coloring/res/hsv5.png new file mode 100644 index 000000000..13a01fd1d Binary files /dev/null and b/contents/domain_coloring/res/hsv5.png differ diff --git a/contents/domain_coloring/res/rgb1.png b/contents/domain_coloring/res/rgb1.png new file mode 100644 index 000000000..d3216e41c Binary files /dev/null and b/contents/domain_coloring/res/rgb1.png differ diff --git a/contents/domain_coloring/res/rgb2.png b/contents/domain_coloring/res/rgb2.png new file mode 100644 index 000000000..1281fc605 Binary files /dev/null and b/contents/domain_coloring/res/rgb2.png differ diff --git a/contents/domain_coloring/res/shade.png b/contents/domain_coloring/res/shade.png new file mode 100644 index 000000000..b0b97c788 Binary files /dev/null and b/contents/domain_coloring/res/shade.png differ diff --git a/contents/domain_coloring/res/z2.png b/contents/domain_coloring/res/z2.png new file mode 100644 index 000000000..12efeb2fb Binary files /dev/null and b/contents/domain_coloring/res/z2.png differ diff --git a/contents/euclidean_algorithm/code/asm-x64/euclidean_example.s b/contents/euclidean_algorithm/code/asm-x64/euclidean_example.s index c08bb0878..53a008908 100644 --- a/contents/euclidean_algorithm/code/asm-x64/euclidean_example.s +++ b/contents/euclidean_algorithm/code/asm-x64/euclidean_example.s @@ -1,7 +1,8 @@ .intel_syntax noprefix .section .rodata - fmt: .string "%d\n" + euclid_mod_fmt: .string "[#]\nModulus-based euclidean algorithm result:\n%d\n" + euclid_sub_fmt: .string "[#]\nSubtraction-based euclidean algorithm result:\n%d\n" .section .text .global main @@ -59,14 +60,14 @@ main: mov rdi, 4288 # Call euclid_mod mov rsi, 5184 call euclid_mod - mov rdi, OFFSET fmt # Print output + mov rdi, OFFSET euclid_mod_fmt # Print output mov rsi, rax xor rax, rax call printf mov rdi, 1536 # Call euclid_sub mov rsi, 9856 call euclid_sub - mov rdi, OFFSET fmt # Print output + mov rdi, OFFSET euclid_sub_fmt # Print output mov rsi, rax xor rax, rax call printf diff --git a/contents/euclidean_algorithm/code/bash/euclid.bash b/contents/euclidean_algorithm/code/bash/euclid.bash index bef4b5da0..0ddd49537 100755 --- a/contents/euclidean_algorithm/code/bash/euclid.bash +++ b/contents/euclidean_algorithm/code/bash/euclid.bash @@ -38,6 +38,6 @@ euclid_sub() { } result=$(euclid_mod $((64 * 67)) $((64 * 81))) -echo "$result" +echo -e "[#]\nModulus-based euclidean algorithm result:\n$result" result=$(euclid_sub $((128 * 12)) $((128 * 77))) -echo "$result" +echo -e "[#]\nSubtraction-based euclidean algorithm result:\n$result" diff --git a/contents/euclidean_algorithm/code/c/euclidean_example.c b/contents/euclidean_algorithm/code/c/euclidean_example.c index 16b0ce9ea..d1c0b69d1 100644 --- a/contents/euclidean_algorithm/code/c/euclidean_example.c +++ b/contents/euclidean_algorithm/code/c/euclidean_example.c @@ -1,40 +1,40 @@ #include -#include +#include int euclid_mod(int a, int b) { - a = abs(a); - b = abs(b); + a = abs(a); + b = abs(b); - while (b != 0) { - int temp = b; - b = a % b; - a = temp; - } + while (b != 0) { + int temp = b; + b = a % b; + a = temp; + } - return a; + return a; } int euclid_sub(int a, int b) { - a = abs(a); - b = abs(b); - - while (a != b) { - if (a > b) { - a -= b; - } else { - b -= a; - } + a = abs(a); + b = abs(b); + + while (a != b) { + if (a > b) { + a -= b; + } else { + b -= a; } + } - return a; + return a; } int main() { - int check1 = euclid_mod(64 * 67, 64 * 81); - int check2 = euclid_sub(128 * 12, 128 * 77); + int check1 = euclid_mod(64 * 67, 64 * 81); + int check2 = euclid_sub(128 * 12, 128 * 77); - printf("%d\n", check1); - printf("%d\n", check2); + printf("[#]\nModulus-based euclidean algorithm result:\n%d\n", check1); + printf("[#]\nSubtraction-based euclidean algorithm result:\n%d\n", check2); - return 0; + return 0; } diff --git a/contents/euclidean_algorithm/code/clisp/euclidean.lisp b/contents/euclidean_algorithm/code/clisp/euclidean.lisp index 62f525ac3..19cba7358 100644 --- a/contents/euclidean_algorithm/code/clisp/euclidean.lisp +++ b/contents/euclidean_algorithm/code/clisp/euclidean.lisp @@ -17,8 +17,10 @@ (abs a) (euclid-mod b (mod a b)))) -(print (euclid-sub (* 64 67) (* 64 81))) -(print (euclid-mod (* 128 12) (* 128 77))) +(format T "[#]~%Modulus-based euclidean algorithm result:~%") +(format T "~d~%" (euclid-sub (* 64 67) (* 64 81))) +(format T "[#]~%Subtraction-based euclidean algorithm result:~%") +(format T "~d~%" (euclid-mod (* 128 12) (* 128 77))) ;; Quick test (assert diff --git a/contents/euclidean_algorithm/code/coconut/euclidean.coco b/contents/euclidean_algorithm/code/coconut/euclidean.coco new file mode 100644 index 000000000..17c592cf0 --- /dev/null +++ b/contents/euclidean_algorithm/code/coconut/euclidean.coco @@ -0,0 +1,21 @@ +def euclid_sub(int(a), 0) = a +addpattern def euclid_sub(0, int(b)) = b + +addpattern def euclid_sub(int(a), int(b)): + if a < b: + return euclid_sub(a, b - a) + elif b < a: + return euclid_sub(a - b, b) + return a + + +def euclid_mod(int(a), 0) = a +addpattern def euclid_mod(0, int(b)) = b + +addpattern def euclid_mod(int(a), int(b)) = euclid_mod(b, a % b) + +if __name__ == '__main__': + print('[#]\nModulus-based euclidean algorithm result:') + print(euclid_mod(64 * 67, 64 * 81)) + print('[#]\nSubtraction-based euclidean algorithm result:') + print(euclid_sub(128 * 12, 128 * 77)) diff --git a/contents/euclidean_algorithm/code/c++/euclidean.cpp b/contents/euclidean_algorithm/code/cpp/euclidean.cpp similarity index 76% rename from contents/euclidean_algorithm/code/c++/euclidean.cpp rename to contents/euclidean_algorithm/code/cpp/euclidean.cpp index f7b818802..1f6c04e27 100644 --- a/contents/euclidean_algorithm/code/c++/euclidean.cpp +++ b/contents/euclidean_algorithm/code/cpp/euclidean.cpp @@ -34,6 +34,6 @@ int main() { auto check1 = euclid_mod(64 * 67, 64 * 81); auto check2 = euclid_sub(128 * 12, 128 * 77); - std::cout << check1 << '\n'; - std::cout << check2 << '\n'; + std::cout << "[#]\nModulus-based euclidean algorithm result:\n" << check1 << '\n'; + std::cout << "[#]\nSubtraction-based euclidean algorithm result:\n" << check2 << '\n'; } diff --git a/contents/euclidean_algorithm/code/csharp/Program.cs b/contents/euclidean_algorithm/code/csharp/Program.cs index edf1edfd4..80857df27 100644 --- a/contents/euclidean_algorithm/code/csharp/Program.cs +++ b/contents/euclidean_algorithm/code/csharp/Program.cs @@ -7,12 +7,13 @@ class Program { static void Main(string[] args) { - Console.WriteLine("EuclideanAlgorithm"); var euclideanAlgorithm = new EuclideanAlgorithm(); int check = euclideanAlgorithm.EuclidMod(64 * 67, 64 * 81); int check2 = euclideanAlgorithm.EuclidSub(128 * 12, 128 * 77); + Console.WriteLine("[#]\nModulus-based euclidean algorithm result:"); Console.WriteLine(check); + Console.WriteLine("[#]\nSubtraction-based euclidean algorithm result:"); Console.WriteLine(check2); } } diff --git a/contents/euclidean_algorithm/code/d/euclidean_algorithm.d b/contents/euclidean_algorithm/code/d/euclidean_algorithm.d index 042a9bae1..585d0aa1b 100644 --- a/contents/euclidean_algorithm/code/d/euclidean_algorithm.d +++ b/contents/euclidean_algorithm/code/d/euclidean_algorithm.d @@ -37,6 +37,6 @@ void main() auto check1 = euclid_mod(64 * 67, 64 * 81); auto check2 = euclid_sub(128 * 12, 128 * 77); - writeln("Modulus-based euclidean algorithm result: ", check1); - writeln("Subtraction-based euclidean algorithm result: ", check2); + writeln("[#]\nModulus-based euclidean algorithm result:\n", check1); + writeln("[#]\nSubtraction-based euclidean algorithm result:\n", check2); } diff --git a/contents/euclidean_algorithm/code/expected.json b/contents/euclidean_algorithm/code/expected.json new file mode 100644 index 000000000..f6b6ffb70 --- /dev/null +++ b/contents/euclidean_algorithm/code/expected.json @@ -0,0 +1,8 @@ +{ + "Description": "euclidean algorithm", + "Delta" : 0.0, + "OutputValues" : [ + "64", + "128" + ] +} diff --git a/contents/euclidean_algorithm/code/fortran/euclidean.f90 b/contents/euclidean_algorithm/code/fortran/euclidean.f90 index 3107e4de2..e0dc9610e 100644 --- a/contents/euclidean_algorithm/code/fortran/euclidean.f90 +++ b/contents/euclidean_algorithm/code/fortran/euclidean.f90 @@ -38,12 +38,18 @@ PROGRAM euclidean IMPLICIT NONE INTEGER :: a, b, euclid_sub, euclid_mod - a = 24 - b = 27 - WRITE(*,*) 'Subtraction method: GCD is: ', euclid_sub(a, b) + a = 64 * 67 + b = 64 * 81 - a = 24 - b = 27 - WRITE(*,*) 'Modulus method: GCD is: ', euclid_mod(a, b) + WRITE(*,'(a)') '[#]' + WRITE(*,'(a)') 'Modulus-based euclidean algorithm result:' + WRITE(*, '(g0)') euclid_mod(a, b) + + a = 128 * 12 + b = 128 * 77 + + WRITE(*,'(a)') '[#]' + WRITE(*,'(a)') 'Subtraction-based euclidean algorithm result:' + WRITE(*, '(g0)') euclid_sub(a, b) END PROGRAM euclidean diff --git a/contents/euclidean_algorithm/code/go/euclidean.go b/contents/euclidean_algorithm/code/go/euclidean.go index f457b1849..ea543fe75 100644 --- a/contents/euclidean_algorithm/code/go/euclidean.go +++ b/contents/euclidean_algorithm/code/go/euclidean.go @@ -41,6 +41,8 @@ func main() { check1 := euclidMod(64*67, 64*81) check2 := euclidSub(128*12, 128*77) + fmt.Println("[#]\nModulus-based euclidean algorithm result:") fmt.Println(check1) + fmt.Println("[#]\nSubtraction-based euclidean algorithm result:") fmt.Println(check2) } diff --git a/contents/euclidean_algorithm/code/haskell/euclidean_algorithm.hs b/contents/euclidean_algorithm/code/haskell/euclidean_algorithm.hs new file mode 100644 index 000000000..917aef7df --- /dev/null +++ b/contents/euclidean_algorithm/code/haskell/euclidean_algorithm.hs @@ -0,0 +1,37 @@ +-- Method 1: Euclid's original subtraction algorithm + +euclidSub :: Integer -> Integer -> Integer +euclidSub a b = inner (abs a) (abs b) + where + inner x y + -- if a = b, then the gcd is a + | x == y = x + -- if a < b: Recursively call euclidSub with the a and (b-a) as new inputs + | x < y = euclidSub x (y - x) + -- otherwise: Recursively call euclidSub with the a and (b-a) as new inputs + | otherwise = euclidSub (x - y) y + +-- _______________________________________________________________________ + +-- Method 2: Modern implemetation - The modulus method. + +euclidMod :: Integer -> Integer -> Integer +euclidMod a b = inner (abs a) (abs b) + where + -- if a divides b, then gcd is a + inner x 0 = x + -- otherwise, recursively call inner with b and (a mod b) as new inputs + inner x y = inner y (x `mod` y) + +-- _________________________________________________________________________ + +-- Examples + +main :: IO () +main = do + let chk1 = euclidMod (64 * 67) (64 * 81) + chk2 = euclidSub (128 * 12) (128 * 77) + putStrLn "[#]\nModulus-based euclidean algorithm result:" + print chk1 + putStrLn "[#]\nSubtraction-based euclidean algorithm result:" + print chk2 diff --git a/contents/euclidean_algorithm/code/haskell/euclidean_example.hs b/contents/euclidean_algorithm/code/haskell/euclidean_example.hs deleted file mode 100644 index e09b450c9..000000000 --- a/contents/euclidean_algorithm/code/haskell/euclidean_example.hs +++ /dev/null @@ -1,21 +0,0 @@ --- contributed by Nicole Mazzuca (ubsan) -euclidSub :: Integer -> Integer -> Integer -euclidSub a b = inner (abs a) (abs b) - where - inner x y - | x == y = x - | x < y = euclidSub x (y - x) - | otherwise = euclidSub (x - y) y - -euclidMod :: Integer -> Integer -> Integer -euclidMod a b = inner (abs a) (abs b) - where - inner x 0 = x - inner x y = inner y (x `mod` y) - -main :: IO () -main = do - let chk1 = euclidMod (64 * 67) (64 * 81) - chk2 = euclidSub (128 * 12) (128 * 77) - print chk1 - print chk2 diff --git a/contents/euclidean_algorithm/code/java/EuclideanAlgo.java b/contents/euclidean_algorithm/code/java/EuclideanAlgo.java index 95d233f58..ac53379da 100644 --- a/contents/euclidean_algorithm/code/java/EuclideanAlgo.java +++ b/contents/euclidean_algorithm/code/java/EuclideanAlgo.java @@ -26,7 +26,9 @@ public static int euclidMod(int a, int b) { } public static void main(String[] args) { + System.out.println("[#]\nModulus-based euclidean algorithm result:"); System.out.println(euclidMod(64 * 67, 64 * 81)); + System.out.println("[#]\nSubtraction-based euclidean algorithm result:"); System.out.println(euclidSub(128 * 12, 128 * 77)); } } diff --git a/contents/euclidean_algorithm/code/javascript/euclidean_example.js b/contents/euclidean_algorithm/code/javascript/euclidean_example.js index fbaf4bfcc..2199c37dc 100644 --- a/contents/euclidean_algorithm/code/javascript/euclidean_example.js +++ b/contents/euclidean_algorithm/code/javascript/euclidean_example.js @@ -18,14 +18,16 @@ function euclidSub(a, b) { while (a !== b) { if (a > b) { - a -= a - b; + a -= b; } else { - b = b - a; + b -= a; } } return a; } +console.log('[#]\nModulus-based euclidean algorithm result:') console.log(euclidMod(64 * 67, 64 * 81)); +console.log('[#]\nSubtraction-based euclidean algorithm result:') console.log(euclidSub(128 * 12, 128 * 77)); diff --git a/contents/euclidean_algorithm/code/julia/euclidean.jl b/contents/euclidean_algorithm/code/julia/euclidean.jl index 744ae2187..a85f931ae 100644 --- a/contents/euclidean_algorithm/code/julia/euclidean.jl +++ b/contents/euclidean_algorithm/code/julia/euclidean.jl @@ -28,8 +28,8 @@ function main() check1 = euclid_mod(64 * 67, 64 * 81); check2 = euclid_sub(128 * 12, 128 * 77); - println("Modulus-based euclidean algorithm result: $(check1)") - println("subtraction-based euclidean algorithm result: $(check2)") + println("[#]\nModulus-based euclidean algorithm result:\n$(check1)") + println("[#]\nSubtraction-based euclidean algorithm result:\n$(check2)") end diff --git a/contents/euclidean_algorithm/code/kotlin/Euclidean.kt b/contents/euclidean_algorithm/code/kotlin/Euclidean.kt index 9e14c7463..855afcd59 100644 --- a/contents/euclidean_algorithm/code/kotlin/Euclidean.kt +++ b/contents/euclidean_algorithm/code/kotlin/Euclidean.kt @@ -26,6 +26,8 @@ fun euclidMod(a: Int, b: Int): Int { } fun main(args: Array) { - println(euclidSub(128 * 12, 128 * 77)) + println("[#]\nModulus-based euclidean algorithm result:") println(euclidMod(64 * 67, 64 * 81)) -} + println("[#]\nSubtraction-based euclidean algorithm result:") + println(euclidSub(128 * 12, 128 * 77)) +} \ No newline at end of file diff --git a/contents/euclidean_algorithm/code/lolcode/euclid.lol b/contents/euclidean_algorithm/code/lolcode/euclid.lol index 418851107..28e3230e3 100644 --- a/contents/euclidean_algorithm/code/lolcode/euclid.lol +++ b/contents/euclidean_algorithm/code/lolcode/euclid.lol @@ -24,7 +24,7 @@ HAI 1.2 HOW IZ I UKLIDSUP YR NUM1 AN YR NUM2 NUM1 R I IZ ABZ YR NUM1 MKAY - NUM2 R EI IZ ABZ YR NUM2 MKAY + NUM2 R I IZ ABZ YR NUM2 MKAY IM IN YR LOOP BOTH SAEM NUM1 AN NUM2, O RLY? @@ -44,4 +44,4 @@ HAI 1.2 VISIBLE CHECK1 VISIBLE CHECK2 -KTHXBYE \ No newline at end of file +KTHXBYE diff --git a/contents/euclidean_algorithm/code/lua/euclidean.lua b/contents/euclidean_algorithm/code/lua/euclidean.lua index 45ef1fd18..0149ffd34 100644 --- a/contents/euclidean_algorithm/code/lua/euclidean.lua +++ b/contents/euclidean_algorithm/code/lua/euclidean.lua @@ -1,6 +1,6 @@ -function euclidSub (a, b) - local a = math.abs(a) - local b = math.abs(b) +local function euclid_sub(a, b) + a = math.abs(a) + b = math.abs(b) while a ~= b do if a > b then @@ -13,9 +13,9 @@ function euclidSub (a, b) return a end -function euclidMod (a, b) - local a = math.abs(a) - local b = math.abs(b) +local function euclid_mod(a, b) + a = math.abs(a) + b = math.abs(b) while b ~= 0 do a, b = b, a%b @@ -24,9 +24,11 @@ function euclidMod (a, b) return a end -function main () - print(euclidSub(128 * 12, 128 * 77)) - print(euclidMod(64 * 67, 64 * 81)) +local function main() + print("[#]\nModulus-based euclidean algorithm result:") + print(euclid_mod(64 * 67, 64 * 81)) + print("[#]\nSubtraction-based euclidean algorithm result:") + print(euclid_sub(128 * 12, 128 * 77)) end main() diff --git a/contents/euclidean_algorithm/code/matlab/euclidean.m b/contents/euclidean_algorithm/code/matlab/euclidean.m index de2a63dec..7a9b317f3 100644 --- a/contents/euclidean_algorithm/code/matlab/euclidean.m +++ b/contents/euclidean_algorithm/code/matlab/euclidean.m @@ -31,6 +31,7 @@ end function euclid() - ['gcd(520,420) via euclidSub: ',num2str(euclidSub(520,420))] - ['gcd(183,244) via euclidMod: ',num2str(euclidMod(183,244))] + ['[#] Modulus-based euclidean algorithm result: ',num2str(euclidMod(64 * 67, 64 * 81))] + + ['[#] Subtraction-based euclidean algorithm result: ',num2str(euclidSub(128 * 12, 128 * 77))] end \ No newline at end of file diff --git a/contents/euclidean_algorithm/code/nim/euclid_algorithm.nim b/contents/euclidean_algorithm/code/nim/euclid_algorithm.nim index e8da73d1a..d52b8062a 100644 --- a/contents/euclidean_algorithm/code/nim/euclid_algorithm.nim +++ b/contents/euclidean_algorithm/code/nim/euclid_algorithm.nim @@ -1,16 +1,16 @@ -proc euclid_mod(in1, in2: int): int = +func euclid_mod(in1, in2: int): int = var a = abs(in1) b = abs(in2) - + while b != 0: let temp: int = b b = a mod b a = temp; - return a + result = a -proc euclid_sub(in1, in2: int): int = +func euclid_sub(in1, in2: int): int = var a = abs(in1) b = abs(in2) @@ -20,8 +20,11 @@ proc euclid_sub(in1, in2: int): int = a -= b else: b -= a - - return a -echo euclid_sub(64 * 67, 64 * 81) -echo euclid_mod(128 * 12, 128 * 77) \ No newline at end of file + result = a + +when isMainModule: + echo "[#]\nModulus-based euclidean algorithm result:" + echo euclid_sub(64 * 67, 64 * 81) + echo "[#]\nSubtraction-based euclidean algorithm result:" + echo euclid_mod(128 * 12, 128 * 77) diff --git a/contents/euclidean_algorithm/code/ocaml/euclidean_example.ml b/contents/euclidean_algorithm/code/ocaml/euclidean_example.ml index 27e3ab166..c363e5e4f 100644 --- a/contents/euclidean_algorithm/code/ocaml/euclidean_example.ml +++ b/contents/euclidean_algorithm/code/ocaml/euclidean_example.ml @@ -19,6 +19,7 @@ let euclid_sub a b = let chk1 = euclid_mod (64 * 67) (64 * 81) let chk2 = euclid_sub (128 * 12) (128 * 77) let () = + Printf.printf "[#]\nModulus-based euclidean algorithm result:\n"; chk1 |> print_int |> print_newline; - chk2 |> print_int |> print_newline - + Printf.printf "[#]\nSubtraction-based euclidean algorithm result:\n"; + chk2 |> print_int |> print_newline \ No newline at end of file diff --git a/contents/euclidean_algorithm/code/php/euclidean.php b/contents/euclidean_algorithm/code/php/euclidean.php index 52aac08c9..cd13e9d74 100644 --- a/contents/euclidean_algorithm/code/php/euclidean.php +++ b/contents/euclidean_algorithm/code/php/euclidean.php @@ -29,7 +29,7 @@ function euclid_mod(int $a, int $b): int return $a; } -printf('Euclidean mod: %s', euclid_mod(64 * 67, 64 * 81)); +printf('[#]'.PHP_EOL.'Modulus-based euclidean algorithm result:'.PHP_EOL.'%s', euclid_mod(64 * 67, 64 * 81)); echo PHP_EOL; -printf('Euclidean sub: %s', euclid_sub(128 * 12, 128 * 77)); +printf('[#]'.PHP_EOL.'Subtraction-based euclidean algorithm result:'.PHP_EOL.'%s', euclid_sub(128 * 12, 128 * 77)); echo PHP_EOL; diff --git a/contents/euclidean_algorithm/code/powershell/euclidean_algorithm.ps1 b/contents/euclidean_algorithm/code/powershell/euclidean_algorithm.ps1 new file mode 100644 index 000000000..3e3925ed7 --- /dev/null +++ b/contents/euclidean_algorithm/code/powershell/euclidean_algorithm.ps1 @@ -0,0 +1,30 @@ +function Sub-Euclid($a, $b) { + $a = [Math]::Abs($a) + $b = [Math]::Abs($b) + + while ($a -ne $b) { + if ($a -gt $b) { + $a = $a - $b + } else { + $b = $b - $a + } + } + + return $a +} + +function Mod-Euclid($a, $b) { + $a = [Math]::Abs($a) + $b = [Math]::Abs($b) + + while ($b -ne 0) { + $tmp = $b + $b = $a % $b + $a = $tmp + } + + return $a +} + +Write-Host "[#]`nSubtraction-based euclidean algorithm result:`n$(Mod-Euclid $(64 * 67) $(64 * 81))" +Write-Host "[#]`nModulus-based euclidean algorithm result:`n$(Sub-Euclid $(128 * 12) $(128 * 77))" diff --git a/contents/euclidean_algorithm/code/python/euclidean_example.py b/contents/euclidean_algorithm/code/python/euclidean_example.py index 367aed512..03d51aa4b 100644 --- a/contents/euclidean_algorithm/code/python/euclidean_example.py +++ b/contents/euclidean_algorithm/code/python/euclidean_example.py @@ -13,6 +13,11 @@ def euclid_sub(a, b): a = abs(a) b = abs(b) + if a == 0: + return b + elif b == 0: + return a + while a != b: if a > b: a -= b @@ -22,5 +27,7 @@ def euclid_sub(a, b): return a if __name__=="__main__": - print('Euclidean mod: ', euclid_mod(64 * 67, 64 * 81)) - print('Euclidean sub: ', euclid_sub(128 * 12, 128 * 77)) + print('[#]\nModulus-based euclidean algorithm result:'), + print(euclid_mod(64 * 67, 64 * 81)) + print('[#]\nSubtraction-based euclidean algorithm result:') + print(euclid_sub(128 * 12, 128 * 77)) diff --git a/contents/euclidean_algorithm/code/racket/euclidean_algorithm.rkt b/contents/euclidean_algorithm/code/racket/euclidean_algorithm.rkt index f170d8e17..8d19eab86 100755 --- a/contents/euclidean_algorithm/code/racket/euclidean_algorithm.rkt +++ b/contents/euclidean_algorithm/code/racket/euclidean_algorithm.rkt @@ -23,5 +23,7 @@ ) ) +(displayln "[#]\nModulus-based euclidean algorithm result:") (displayln (euclid_sub (* 64 67) (* 64 81))) +(displayln "[#]\nSubtraction-based euclidean algorithm result:") (displayln (euclid_mod (* 128 12) (* 128 77))) diff --git a/contents/euclidean_algorithm/code/ruby/euclidean.rb b/contents/euclidean_algorithm/code/ruby/euclidean.rb index b8667ad71..b55bbd728 100644 --- a/contents/euclidean_algorithm/code/ruby/euclidean.rb +++ b/contents/euclidean_algorithm/code/ruby/euclidean.rb @@ -17,9 +17,8 @@ def gcd_minus(a, b) end a end - -p gcd_mod(12 * 6, 12 * 4) #=> 12 -p gcd_mod(9 * 667, 9 * 104) #=> 9 -p gcd_minus(12 * 6, 12 * 4) #=> 12 -p gcd_minus(9 * 667, 9 * 104) #=> 9 +print "[#]\nModulus-based euclidean algorithm result:\n" +p gcd_mod(64 * 67, 64 * 81) +print "[#]\nSubtraction-based euclidean algorithm result:\n" +p gcd_minus(128 * 12, 128 * 77) diff --git a/contents/euclidean_algorithm/code/rust/euclidean_example.rs b/contents/euclidean_algorithm/code/rust/euclidean_example.rs index 89b55ba22..1c9fb55f7 100644 --- a/contents/euclidean_algorithm/code/rust/euclidean_example.rs +++ b/contents/euclidean_algorithm/code/rust/euclidean_example.rs @@ -29,6 +29,6 @@ fn euclid_rem(mut a: i64, mut b: i64) -> i64 { fn main() { let chk1 = euclid_rem(64 * 67, 64 * 81); let chk2 = euclid_sub(128 * 12, 128 * 77); - println!("{}", chk1); - println!("{}", chk2); + println!("[#]\nModulus-based euclidean algorithm result:\n{}", chk1); + println!("[#]\nSubtraction-based euclidean algorithm result:\n{}", chk2); } diff --git a/contents/euclidean_algorithm/code/scala/euclidean.scala b/contents/euclidean_algorithm/code/scala/euclidean.scala index bc3fe103a..25079e603 100644 --- a/contents/euclidean_algorithm/code/scala/euclidean.scala +++ b/contents/euclidean_algorithm/code/scala/euclidean.scala @@ -3,8 +3,8 @@ object Euclid { def euclid_sub(a: Int, b: Int): Int = (Math.abs(a), Math.abs(b)) match { case (0, _) | (_, 0) => 0 - case (x, y) if x < y => euclid(x, y - x) - case (x, y) if x > y => euclid(x - y, y) + case (x, y) if x < y => euclid_sub(x, y - x) + case (x, y) if x > y => euclid_sub(x - y, y) case _ => a } @@ -15,8 +15,10 @@ object Euclid { } def main(args: Array[String]): Unit = { - println(euclid_sub(151 * 899, 151 * 182)) - println(euclid_mod(151 * 899, 151 * 182)) + println("[#]\nModulus-based euclidean algorithm result:") + println(euclid_mod(64 * 67, 64 * 81)) + println("[#]\nSubtraction-based euclidean algorithm result:") + println(euclid_sub(128 * 12, 128 * 77)) } } diff --git a/contents/euclidean_algorithm/code/scheme/euclidalg.ss b/contents/euclidean_algorithm/code/scheme/euclidalg.ss index 3d891ba73..2cda992d8 100644 --- a/contents/euclidean_algorithm/code/scheme/euclidalg.ss +++ b/contents/euclidean_algorithm/code/scheme/euclidalg.ss @@ -11,5 +11,8 @@ a (euclid-mod b (modulo a b)))) +(display "[#]\nModulus-based euclidean algorithm result:") (newline) (display (euclid-mod (* 64 67) (* 64 81))) (newline) -(display (euclid-sub (* 64 12) (* 64 27))) (newline) + +(display "[#]\nSubtraction-based euclidean algorithm result:") (newline) +(display (euclid-sub (* 128 12) (* 128 77))) (newline) diff --git a/contents/euclidean_algorithm/code/swift/euclidean_algorithm.swift b/contents/euclidean_algorithm/code/swift/euclidean_algorithm.swift index 7b43959ad..9c2c71448 100644 --- a/contents/euclidean_algorithm/code/swift/euclidean_algorithm.swift +++ b/contents/euclidean_algorithm/code/swift/euclidean_algorithm.swift @@ -27,7 +27,9 @@ func euclidMod(a: Int, b: Int) -> Int { } func main() { + print("[#]\nModulus-based euclidean algorithm result:") print(euclidMod(a: 64 * 67, b: 64 * 81)) + print("[#]\nSubtraction-based euclidean algorithm result:") print(euclidSub(a: 128 * 12, b: 128 * 77)) } diff --git a/contents/euclidean_algorithm/code/viml/euclidean.vim b/contents/euclidean_algorithm/code/viml/euclidean.vim new file mode 100644 index 000000000..c7b939f20 --- /dev/null +++ b/contents/euclidean_algorithm/code/viml/euclidean.vim @@ -0,0 +1,33 @@ +function s:euclid_mod(a, b) + let l:a = abs(a:a) + let l:b = abs(a:b) + + while l:b != 0 + let l:c = l:b + let l:b = l:a % l:b + let l:a = l:c + endwhile + + return l:a +endfunction + +function s:euclid_sub(a, b) + let l:a = abs(a:a) + let l:b = abs(a:b) + + while l:a != l:b + if l:a > l:b + let l:a -= l:b + else + let l:b -= l:a + endif + endwhile + + return l:a +endfunction + +let s:check_1 = s:euclid_mod(64 * 67, 64 * 71) +let s:check_2 = s:euclid_sub(128 * 12, 128 * 77) + +echo 'Modulus-based euclidean algorithm result:' s:check_1 +echo 'subtraction-based euclidean algorithm result:' s:check_2 diff --git a/contents/euclidean_algorithm/euclidean_algorithm.md b/contents/euclidean_algorithm/euclidean_algorithm.md index d1ac7e2de..f75f6651f 100644 --- a/contents/euclidean_algorithm/euclidean_algorithm.md +++ b/contents/euclidean_algorithm/euclidean_algorithm.md @@ -5,6 +5,8 @@ Computer science is (almost by definition) a science about computers -- a device The algorithm is a simple way to find the *greatest common divisor* (GCD) of two numbers, which is useful for a number of different applications (like reducing fractions). The first method (envisioned by Euclid) uses simple subtraction: {% method %} +{% sample lang="vim" %} +[import:14-27, lang="vim"](code/viml/euclidean.vim) {% sample lang="c" %} [import:17-30, lang="c_cpp"](code/c/euclidean_example.c) {% sample lang="cs" %} @@ -12,7 +14,7 @@ The algorithm is a simple way to find the *greatest common divisor* (GCD) of two {% sample lang="clj" %} [import:2-8, lang="clojure"](code/clojure/euclidean_example.clj) {% sample lang="cpp" %} -[import:18-31, lang="c_cpp"](code/c++/euclidean.cpp) +[import:18-31, lang="c_cpp"](code/cpp/euclidean.cpp) {% sample lang="java" %} [import:3-16, lang="java"](code/java/EuclideanAlgo.java) {% sample lang="kotlin" %} @@ -22,9 +24,9 @@ The algorithm is a simple way to find the *greatest common divisor* (GCD) of two {% sample lang="lisp" %} [import:3-12, lang="lisp"](code/clisp/euclidean.lisp) {% sample lang="py" %} -[import:11-22, lang="python"](code/python/euclidean_example.py) -{% sample lang="haskell" %} -[import:2-8, lang="haskell"](code/haskell/euclidean_example.hs) +[import:11-27, lang="python"](code/python/euclidean_example.py) +{% sample lang="hs" %} +[import:3-13, lang="haskell"](code/haskell/euclidean_algorithm.hs) {% sample lang="rs" %} [import:3-15, lang="rust"](code/rust/euclidean_example.rs) {% sample lang="ml" %} @@ -33,7 +35,7 @@ The algorithm is a simple way to find the *greatest common divisor* (GCD) of two [import:25-38, lang="go"](code/go/euclidean.go) {% sample lang="swift" %} [import:1-14, lang="swift"](code/swift/euclidean_algorithm.swift) -{% sample lang="matlab" %} +{% sample lang="m" %} [import:3-17, lang="matlab"](code/matlab/euclidean.m) {% sample lang="lua" %} [import:1-14, lang="lua"](code/lua/euclidean.lua) @@ -54,7 +56,7 @@ The algorithm is a simple way to find the *greatest common divisor* (GCD) of two {% sample lang="scala" %} [import:3-8, lang="scala"](code/scala/euclidean.scala) {% sample lang="racket" %} -[import:3-14, lang="lisp"](code/racket/euclidean_algorithm.rkt) +[import:3-14, lang="racket"](code/racket/euclidean_algorithm.rkt) {% sample lang="ruby" %} [import:8-19, lang="ruby"](code/ruby/euclidean.rb) {% sample lang="st" %} @@ -73,20 +75,28 @@ The algorithm is a simple way to find the *greatest common divisor* (GCD) of two [import:1-7, lang="scheme"](code/scheme/euclidalg.ss) {% sample lang="scratch" %}

- +

+# leave one line empty: + +{% sample lang="ps1" %} +[import:1-14, lang="powershell"](code/powershell/euclidean_algorithm.ps1) +{% sample lang="coco" %} +[import:1-9, lang="coconut"](code/coconut/euclidean.coco) {% endmethod %} Here, we simply line the two numbers up every step and subtract the lower value from the higher one every timestep. Once the two values are equal, we call that value the greatest common divisor. A graph of `a` and `b` as they change every step would look something like this:

- +

Modern implementations, though, often use the modulus operator (%) like so {% method %} +{% sample lang="vim" %} +[import:1-12, lang="vim"](code/viml/euclidean.vim) {% sample lang="c" %} [import:4-16, lang="c_cpp"](code/c/euclidean_example.c) {% sample lang="cs" %} @@ -94,7 +104,7 @@ Modern implementations, though, often use the modulus operator (%) like so {% sample lang="clj" %} [import:9-13, lang="clojure"](code/clojure/euclidean_example.clj) {% sample lang="cpp" %} -[import:5-15, lang="c_cpp"](code/c++/euclidean.cpp) +[import:5-15, lang="c_cpp"](code/cpp/euclidean.cpp) {% sample lang="java" %} [import:18-26, lang="java"](code/java/EuclideanAlgo.java) {% sample lang="kotlin" %} @@ -105,8 +115,8 @@ Modern implementations, though, often use the modulus operator (%) like so [import:14-18, lang="lisp"](code/clisp/euclidean.lisp) {% sample lang="py" %} [import:1-9, lang="python"](code/python/euclidean_example.py) -{% sample lang="haskell" %} -[import:10-14, lang="haskell"](code/haskell/euclidean_example.hs) +{% sample lang="hs" %} +[import:18-25, lang="haskell"](code/haskell/euclidean_algorithm.hs) {% sample lang="rs" %} [import:17-27, lang="rust"](code/rust/euclidean_example.rs) {% sample lang="ml" %} @@ -115,7 +125,7 @@ Modern implementations, though, often use the modulus operator (%) like so [import:14-23, lang="go"](code/go/euclidean.go) {% sample lang="swift" %} [import:16-27, lang="swift"](code/swift/euclidean_algorithm.swift) -{% sample lang="matlab" %} +{% sample lang="m" %} [import:19-31, lang="matlab"](code/matlab/euclidean.m) {% sample lang="lua" %} [import:16-25, lang="lua"](code/lua/euclidean.lua) @@ -136,7 +146,7 @@ Modern implementations, though, often use the modulus operator (%) like so {% sample lang="scala" %} [import:10-14, lang="scala"](code/scala/euclidean.scala) {% sample lang="racket" %} -[import:16-24, lang="lisp"](code/racket/euclidean_algorithm.rkt) +[import:16-24, lang="racket"](code/racket/euclidean_algorithm.rkt) {% sample lang="ruby" %} [import:1-6, lang="ruby"](code/ruby/euclidean.rb) {% sample lang="st" %} @@ -155,15 +165,21 @@ Modern implementations, though, often use the modulus operator (%) like so [import:9-12, lang="scheme"](code/scheme/euclidalg.ss) {% sample lang="scratch" %}

- +

+# leave one line empty: + +{% sample lang="ps1" %} +[import:16-27, lang="powershell"](code/powershell/euclidean_algorithm.ps1) +{% sample lang="coco" %} +[import:12-15, lang="coconut"](code/coconut/euclidean.coco) {% endmethod %} Here, we set `b` to be the remainder of `a%b` and `a` to be whatever `b` was last timestep. Because of how the modulus operator works, this will provide the same information as the subtraction-based implementation, but when we show `a` and `b` as they change with time, we can see that it might take many fewer steps:

- +

The Euclidean Algorithm is truly fundamental to many other algorithms throughout the history of computer science and will definitely be used again later. At least to me, it's amazing how such an ancient algorithm can still have modern use and appeal. That said, there are still other algorithms out there that can find the greatest common divisor of two numbers that are arguably better in certain cases than the Euclidean algorithm, but the fact that we are discussing Euclid two millennia after his death shows how timeless and universal mathematics truly is. I think that's pretty cool. @@ -173,12 +189,14 @@ The Euclidean Algorithm is truly fundamental to many other algorithms throughout Here's a video on the Euclidean algorithm:
- +
## Example Code {% method %} +{% sample lang="vim" %} +[import, lang="vim"](code/viml/euclidean.vim) {% sample lang="c" %} [import, lang="c_cpp"](code/c/euclidean_example.c) {% sample lang="cs" %} @@ -189,17 +207,19 @@ Here's a video on the Euclidean algorithm: {% sample lang="clj" %} [import, lang="clojure"](code/clojure/euclidean_example.clj) {% sample lang="cpp" %} -[import, lang="c_cpp"](code/c++/euclidean.cpp) +[import, lang="c_cpp"](code/cpp/euclidean.cpp) {% sample lang="java" %} [import, lang="java"](code/java/EuclideanAlgo.java) +{% sample lang="kotlin" %} +[import, lang="kotlin"](code/kotlin/Euclidean.kt) {% sample lang="js" %} [import, lang="javascript"](code/javascript/euclidean_example.js) {% sample lang="lisp" %} [import, lang="lisp"](code/clisp/euclidean.lisp) {% sample lang="py" %} [import, lang="python"](code/python/euclidean_example.py) -{% sample lang="haskell" %} -[import, lang="haskell"](code/haskell/euclidean_example.hs) +{% sample lang="hs" %} +[import, lang="haskell"](code/haskell/euclidean_algorithm.hs) {% sample lang="rs" %} [import, lang="rust"](code/rust/euclidean_example.rs) {% sample lang="ml" %} @@ -208,7 +228,7 @@ Here's a video on the Euclidean algorithm: [import, lang="go"](code/go/euclidean.go) {% sample lang="swift" %} [import, lang="swift"](code/swift/euclidean_algorithm.swift) -{% sample lang="matlab" %} +{% sample lang="m" %} [import, lang="matlab"](code/matlab/euclidean.m) {% sample lang="lua" %} [import, lang="lua"](code/lua/euclidean.lua) @@ -232,7 +252,7 @@ and modulo method: {% sample lang="scala" %} [import, lang="scala"](code/scala/euclidean.scala) {% sample lang="racket" %} -[import, lang="lisp"](code/racket/euclidean_algorithm.rkt) +[import, lang="racket"](code/racket/euclidean_algorithm.rkt) {% sample lang="ruby" %} [import, lang="ruby"](code/ruby/euclidean.rb) {% sample lang="st" %} @@ -262,8 +282,14 @@ A text version of the program is provided for both versions. The code snippets were taken from this [Scratch project](https://scratch.mit.edu/projects/278727055/)

- +

+# leave one line empty: + +{% sample lang="ps1" %} +[import, lang="powershell"](code/powershell/euclidean_algorithm.ps1) +{% sample lang="coco" %} +[import, lang="coconut"](code/coconut/euclidean.coco) {% endmethod %} @@ -276,7 +302,7 @@ MathJax.Hub.Queue(["Typeset",MathJax.Hub]); ##### Code Examples -The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/master/LICENSE.md)). +The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/main/LICENSE.md)). ##### Text @@ -285,8 +311,8 @@ The text of this chapter was written by [James Schloss](https://github.com/leios [

](https://creativecommons.org/licenses/by-sa/4.0/) ##### Images/Graphics -- The image "[Euclidsub](res/subtraction.png)" was created by [James Schloss](https://github.com/leios) and is licenced under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). -- The image "[Euclidmod](res/modulus.png)" was created by [James Schloss](https://github.com/leios) and is licenced under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[Euclidsub](res/subtraction.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[Euclidmod](res/modulus.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). ##### Pull Requests diff --git a/contents/flood_fill/code/c/flood_fill.c b/contents/flood_fill/code/c/flood_fill.c new file mode 100644 index 000000000..03df6f1fb --- /dev/null +++ b/contents/flood_fill/code/c/flood_fill.c @@ -0,0 +1,253 @@ +#include +#include +#include + +struct canvas { + int max_x, max_y; + int *data; +}; + +struct point { + int x, y; +}; + +struct stack { + size_t top, capacity; + struct point *data; +}; + +struct queue { + size_t front, back, capacity; + struct point *data; +}; + +int inbounds(struct point p, struct canvas c) { + return (p.x < 0 || p.y < 0 || p.y >= c.max_y || p.x >= c.max_x) ? 0 : 1; +} + +int find_neighbors(struct canvas c, struct point p, int old_val, + struct point *neighbors) { + int cnt = 0; + struct point points[4] = { + {p.x, p.y + 1}, + {p.x + 1, p.y}, + {p.x, p.y - 1}, + {p.x - 1, p.y} + }; + + for (int i = 0; i < 4; ++i) { + if (inbounds(points[i], c) && + c.data[points[i].x + c.max_x * points[i].y] == old_val) { + neighbors[cnt++] = points[i]; + } + } + + return cnt; +} + +struct stack get_stack() { + struct stack stk; + + stk.data = malloc(4 * sizeof(struct point)); + stk.capacity = 4; + stk.top = 0; + + return stk; +} + +int stack_empty(struct stack stk) { + return stk.top == 0; +} + +void stack_push(struct stack *stk, struct point element) { + if (stk->top == stk->capacity) { + stk->capacity *= 2; + stk->data = realloc(stk->data, stk->capacity * sizeof(stk->data[0])); + } + + stk->data[stk->top++] = element; +} + +struct point stack_pop(struct stack *stk) { + return stk->data[--stk->top]; +} + +void free_stack(struct stack stk) { + free(stk.data); +} + +void stack_fill(struct canvas c, struct point p, int old_val, int new_val) { + if (old_val == new_val) { + return; + } + + struct stack stk = get_stack(); + stack_push(&stk, p); + + while (!stack_empty(stk)) { + struct point cur_loc = stack_pop(&stk); + if (c.data[cur_loc.x + c.max_x * cur_loc.y] == old_val) { + c.data[cur_loc.x + c.max_x * cur_loc.y] = new_val; + + struct point neighbors[4]; + int cnt = find_neighbors(c, cur_loc, old_val, neighbors); + + for (int i = 0; i < cnt; ++i) { + stack_push(&stk, neighbors[i]); + } + } + } + + free_stack(stk); +} + +struct queue get_queue() { + struct queue q; + + q.data = calloc(4, sizeof(struct point)); + q.front = 0; + q.back = 0; + q.capacity = 4; + + return q; +} + +int queue_empty(struct queue q) { + return q.front == q.back; +} + +void enqueue(struct queue *q, struct point element) { + if (q->front == (q->back + 1) % q->capacity) { + size_t size = sizeof(q->data[0]); + struct point *tmp = calloc((q->capacity * 2), size); + memcpy(tmp, q->data + q->front, (q->capacity - q->front) * size); + memcpy(tmp + q->capacity - q->front, q->data, (q->front - 1) * size); + + free(q->data); + + q->data = tmp; + q->back = q->capacity - 1; + q->front = 0; + q->capacity *= 2; + } + + q->data[q->back] = element; + q->back = (q->back + 1) % q->capacity; +} + +struct point dequeue(struct queue *q) { + struct point ret = q->data[q->front]; + q->front = (q->front + 1) % q->capacity; + + return ret; +} + +void free_queue(struct queue q) { + free(q.data); +} + +void queue_fill(struct canvas c, struct point p, int old_val, int new_val) { + if (old_val == new_val) { + return; + } + + struct queue q = get_queue(sizeof(struct point *)); + enqueue(&q, p); + + while (!queue_empty(q)) { + struct point cur_loc = dequeue(&q); + if (c.data[cur_loc.x + c.max_x * cur_loc.y] == old_val) { + c.data[cur_loc.x + c.max_x * cur_loc.y] = new_val; + + struct point neighbors[4]; + int cnt = find_neighbors(c, cur_loc, old_val, neighbors); + + for (int i = 0; i < cnt; ++i) { + enqueue(&q, neighbors[i]); + } + } + } + + free_queue(q); +} + +void recursive_fill(struct canvas c, struct point p, int old_val, + int new_val) { + + if (old_val == new_val) { + return; + } + + c.data[p.x + c.max_x * p.y] = new_val; + + struct point neighbors[4]; + int cnt = find_neighbors(c, p, old_val, neighbors); + + for (int i = 0; i < cnt; ++i) { + recursive_fill(c, neighbors[i], old_val, new_val); + } +} + +int grid_cmp(int *a, int *b, int size) { + for (int i = 0; i < size; ++i) { + if (a[i] != b[i]) { + return 0; + } + } + + return 1; +} + +int main() { + int grid[25] = { + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0 + }; + int grid1[25] = { + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0 + }; + int grid2[25] = { + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0 + }; + int answer_grid[25] = { + 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0 + }; + + struct canvas c = {5, 5, grid}; + struct canvas c1 = {5, 5, grid1}; + struct canvas c2 = {5, 5, grid2}; + + struct point start_loc = {0, 0}; + + int pass_cnt = 0; + + recursive_fill(c, start_loc, 0, 1); + pass_cnt += grid_cmp(grid, answer_grid, 25); + + stack_fill(c1, start_loc, 0, 1); + pass_cnt += grid_cmp(grid1, answer_grid, 25); + + queue_fill(c2, start_loc, 0, 1); + pass_cnt += grid_cmp(grid2, answer_grid, 25); + + printf("Test Summary: | Pass\tTotal\n"); + printf("Fill Methods |\t%d\t3\n", pass_cnt); + + return 0; +} + diff --git a/contents/flood_fill/code/coconut/flood_fill.coco b/contents/flood_fill/code/coconut/flood_fill.coco new file mode 100644 index 000000000..861d4d04b --- /dev/null +++ b/contents/flood_fill/code/coconut/flood_fill.coco @@ -0,0 +1,113 @@ +from collections import deque +import numpy as np + + +data Point(x, y): + def __add__(self, Point(other)) = Point(self.x + other.x, self.y + other.y) + + +# This function is necessary, because negative indices wrap around the +# array in Coconut. +def inbounds(canvas_shape, Point(location)) = + min(location) >= 0 and location.x < canvas_shape[0] and location.y < canvas_shape[1] + + +def find_neighbours(canvas, Point(location), old_value): + possible_neighbours = ((Point(0, 1), Point(1, 0), Point(0, -1), Point(-1, 0)) + |> map$(location.__add__)) + + yield from possible_neighbours |> filter$(x -> (inbounds(canvas.shape, x) + and canvas[x] == old_value)) + + +def stack_fill(canvas, Point(location), old_value, new_value): + if new_value == old_value or not inbounds(canvas.shape, location): + return + + stack = [location] + + while stack: + current_location = stack.pop() + if canvas[current_location] == old_value: + canvas[current_location] = new_value + stack.extend(find_neighbours(canvas, current_location, old_value)) + + +def queue_fill(canvas, Point(location), old_value, new_value): + if new_value == old_value or not inbounds(canvas.shape, location): + return + + queue = deque() + queue.append(location) + + canvas[location] = new_value + + while queue: + current_location = queue.popleft() + for neighbour in find_neighbours(canvas, current_location, old_value): + canvas[neighbour] = new_value + queue.append(neighbour) + + +def recursive_fill(canvas, Point(location), old_value, new_value): + if new_value == old_value or not inbounds(canvas.shape, location): + return + + canvas[location] = new_value + # consume is important here, because otherwise, the recursive function is not called again + consume( + find_neighbours(canvas, location, old_value) + |> map$(recursive_fill$(canvas, ?, old_value, new_value)) + ) + + +def test_grid(initial_canvas, final_canvas, function): + canvas = initial_canvas.copy() # ensure the initial_canvas is unchanged + function(canvas) + return (canvas == final_canvas).all() + +def test(): + from collections import namedtuple + + TestResults = namedtuple('TestResults', 'passes failures') + pass_count = failure_count = 0 + + grid = np.zeros((5, 5)) + grid[2,:] = 1 + solution_grid = np.zeros((5, 5)) + solution_grid[:3,] = 1 + + starting_location = Point(0, 0) + + recursive_test_func = recursive_fill$(?, starting_location, 0, 1) + # The following is manual unit testing of the function + if test_grid(grid, solution_grid, recursive_test_func): + pass_count += 1 + print('.', end='') + else: + failure_count += 1 + print('F', end='') + + stack_test_func = stack_fill$(?, starting_location, 0, 1) + if test_grid(grid, solution_grid, stack_test_func): + print('.', end='') + pass_count += 1 + else: + print('F', end='') + failure_count += 1 + + queue_test_func = queue_fill$(?, starting_location, 0, 1) + if test_grid(grid, solution_grid, queue_test_func): + print('.', end='') + pass_count += 1 + else: + print('F', end='') + failure_count += 1 + + print() + print(TestResults(pass_count, failure_count)) + +if __name__ == '__main__': + # Testing setup + test() + diff --git a/contents/flood_fill/code/cpp/flood_fill.cpp b/contents/flood_fill/code/cpp/flood_fill.cpp new file mode 100644 index 000000000..918566809 --- /dev/null +++ b/contents/flood_fill/code/cpp/flood_fill.cpp @@ -0,0 +1,156 @@ +#include +#include +#include +#include +#include +#include + +using CartesianIndex = std::array; + +auto inbounds(CartesianIndex size, CartesianIndex loc) { + if (loc[0] < 0 || loc[1] < 0) { + return false; + } else if (loc[0] >= size[0] || loc[1] >= size[1]) { + return false; + } + return true; +} + +auto find_neighbors( + std::vector> const& grid, + CartesianIndex loc, + float old_value, + float /* new_value */) { + + const std::vector possible_neighbors{ + {loc[0], loc[1] + 1}, + {loc[0] + 1, loc[1]}, + {loc[0], loc[1] - 1}, + {loc[0] - 1, loc[1]}}; + + std::vector neighbors; + + for (auto const& possible_neighbor : possible_neighbors) { + const auto size = CartesianIndex{ + static_cast(grid[0].size()), static_cast(grid.size())}; + const auto x = static_cast(possible_neighbor[0]); + const auto y = static_cast(possible_neighbor[1]); + if (inbounds(size, possible_neighbor) && grid[x][y] == old_value) { + neighbors.push_back(possible_neighbor); + } + } + + return neighbors; +} + +void recursive_fill( + std::vector>& grid, + CartesianIndex loc, + float old_value, + float new_value) { + if (old_value == new_value) { + return; + } + + const auto x = static_cast(loc[0]); + const auto y = static_cast(loc[1]); + + grid[x][y] = new_value; + + const auto possible_neighbors = find_neighbors(grid, loc, old_value, new_value); + for (auto const& possible_neighbor : possible_neighbors) { + recursive_fill(grid, possible_neighbor, old_value, new_value); + } +} + +void queue_fill( + std::vector>& grid, + CartesianIndex loc, + float old_value, + float new_value) { + if (old_value == new_value) { + return; + } + + auto q = std::queue{}; + q.push(loc); + const auto x = static_cast(loc[0]); + const auto y = static_cast(loc[1]); + grid[x][y] = new_value; + + while (q.size() > 0) { + const auto current_loc = q.front(); + q.pop(); + const auto possible_neighbors = + find_neighbors(grid, current_loc, old_value, new_value); + for (auto const& neighbor : possible_neighbors) { + const auto neighbor_x = static_cast(neighbor[0]); + const auto neighbor_y = static_cast(neighbor[1]); + grid[neighbor_x][neighbor_y] = new_value; + q.push(neighbor); + } + } +} + +void stack_fill( + std::vector>& grid, + CartesianIndex loc, + float old_value, + float new_value) { + if (old_value == new_value) { + return; + } + + auto s = std::stack{}; + s.push(loc); + + while (s.size() > 0) { + const auto current_loc = s.top(); + s.pop(); + + const auto x = static_cast(current_loc[0]); + const auto y = static_cast(current_loc[1]); + + if (grid[x][y] == old_value) { + grid[x][y] = new_value; + const auto possible_neighbors = + find_neighbors(grid, current_loc, old_value, new_value); + for (auto const& neighbor : possible_neighbors) { + s.push(neighbor); + } + } + } +} + +int main() { + + const std::vector> grid{ + {0, 0, 1, 0, 0}, + {0, 0, 1, 0, 0}, + {0, 0, 1, 0, 0}, + {0, 0, 1, 0, 0}, + {0, 0, 1, 0, 0}}; + + const std::vector> solution_grid{ + {1, 1, 1, 0, 0}, + {1, 1, 1, 0, 0}, + {1, 1, 1, 0, 0}, + {1, 1, 1, 0, 0}, + {1, 1, 1, 0, 0}}; + + const CartesianIndex start_loc{1, 1}; + + auto test_grid = grid; + recursive_fill(test_grid, start_loc, 0.0, 1.0); + assert(test_grid == solution_grid); + + test_grid = grid; + queue_fill(test_grid, start_loc, 0.0, 1.0); + assert(test_grid == solution_grid); + + test_grid = grid; + stack_fill(test_grid, start_loc, 0.0, 1.0); + assert(test_grid == solution_grid); + + return EXIT_SUCCESS; +} \ No newline at end of file diff --git a/contents/flood_fill/code/julia/flood_fill.jl b/contents/flood_fill/code/julia/flood_fill.jl new file mode 100644 index 000000000..bc42ac9d0 --- /dev/null +++ b/contents/flood_fill/code/julia/flood_fill.jl @@ -0,0 +1,139 @@ +using DataStructures +using Test + +# Function to check to make sure we are on the canvas +function inbounds(canvas_size, loc) + + # Make sure we are not beneath or to the left of the canvas + if minimum(Tuple(loc)) < 1 + return false + + # Make sure we are not to the right of the canvas + elseif loc[2] > canvas_size[2] + return false + + # Make sure we are not above the canvas + elseif loc[1] > canvas_size[1] + return false + else + return true + end +end + +function find_neighbors(canvas, loc::CartesianIndex, old_val, new_val) + + # Finding north, south, east, west neighbors + possible_neighbors = [loc + CartesianIndex(0, 1), + loc + CartesianIndex(1, 0), + loc + CartesianIndex(0, -1), + loc + CartesianIndex(-1, 0)] + + # Exclusing neighbors that should not be colored + neighbors = [] + for possible_neighbor in possible_neighbors + if inbounds(size(canvas), possible_neighbor) && + canvas[possible_neighbor] == old_val + push!(neighbors, possible_neighbor) + end + end + + return neighbors +end + +function stack_fill!(canvas, loc::CartesianIndex, old_val, new_val) + if new_val == old_val + return + end + + s = Stack{CartesianIndex}() + push!(s, loc) + + while length(s) > 0 + current_loc = pop!(s) + if canvas[current_loc] == old_val + canvas[current_loc] = new_val + possible_neighbors = find_neighbors(canvas, current_loc, + old_val, new_val) + for neighbor in possible_neighbors + push!(s,neighbor) + end + end + + end +end + + +function queue_fill!(canvas, loc::CartesianIndex, old_val, new_val) + if new_val == old_val + return + end + + q = Queue{CartesianIndex}() + enqueue!(q, loc) + + # Coloring the initial location + canvas[loc] = new_val + + while length(q) > 0 + current_loc = dequeue!(q) + + possible_neighbors = find_neighbors(canvas, current_loc, + old_val, new_val) + + # Coloring as we are enqueuing neighbors + for neighbor in possible_neighbors + canvas[neighbor] = new_val + enqueue!(q,neighbor) + end + + end +end + +function recursive_fill!(canvas, loc::CartesianIndex, old_val, new_val) + + if (old_val == new_val) + return + end + + canvas[loc] = new_val + + possible_neighbors = find_neighbors(canvas, loc, old_val, new_val) + for possible_neighbor in possible_neighbors + recursive_fill!(canvas, possible_neighbor, old_val, new_val) + end +end + +function main() + + # Creation of a 5x5 grid with a single row of 1.0 elements + grid = zeros(5,5) + grid[3,:] .= 1 + + # Create solution grid + answer_grid = zeros(5,5) + answer_grid[1:3, :] .= 1 + + # Start filling at 1,1 + start_loc = CartesianIndex(1,1) + + @testset "Fill Methods" begin + # Use recursive method and reinitialize grid + recursive_fill!(grid, start_loc, 0.0, 1.0) + @test grid == answer_grid + + grid[1:2,:] .= 0 + + # Use queue method and reinitialize grid + queue_fill!(grid, start_loc, 0.0, 1.0) + @test grid == answer_grid + + grid[1:2,:] .= 0 + + # Use stack method and reinitialize grid + stack_fill!(grid, start_loc, 0.0, 1.0) + @test grid == answer_grid + end + +end + +main() diff --git a/contents/flood_fill/code/python/flood_fill.py b/contents/flood_fill/code/python/flood_fill.py new file mode 100644 index 000000000..5b346fa39 --- /dev/null +++ b/contents/flood_fill/code/python/flood_fill.py @@ -0,0 +1,89 @@ +from collections import namedtuple +from queue import Queue +import numpy as np + +Point = namedtuple("Point", "x y") + +def inbounds(canvas_shape, p): + return min(p) >= 0 and p.x < canvas_shape[0] and p.y < canvas_shape[1] + +def find_neighbors(canvas, p, old_val, new_val): + # north, south, east, west neighbors + possible_neighbors = [ + Point(p.x, p.y+1), + Point(p.x+1, p.y), + Point(p.x-1, p.y), + Point(p.x, p.y-1) + ] + + # exclude the neighbors that go out of bounds and should not be colored + neighbors = [] + for possible_neighbor in possible_neighbors: + if inbounds(canvas.shape, possible_neighbor): + if canvas[possible_neighbor] == old_val: + neighbors.append(possible_neighbor) + return neighbors + +def stack_fill(canvas, p, old_val, new_val): + if old_val == new_val: + return + + stack = [p] + + while stack: + cur_loc = stack.pop() + canvas[cur_loc] = new_val + stack += find_neighbors(canvas, cur_loc, old_val, new_val) + +def queue_fill(canvas, p, old_val, new_val): + if old_val == new_val: + return + + q = Queue() + q.put(p) + + canvas[p] = new_val + + while not q.empty(): + cur_loc = q.get() + neighbors = find_neighbors(canvas, cur_loc, old_val, new_val) + + for neighbor in neighbors: + canvas[neighbor] = new_val + q.put(neighbor) + +def recursive_fill(canvas, p, old_val, new_val): + if old_val == new_val: + return + + canvas[p] = new_val + + neighbors = find_neighbors(canvas, p, old_val, new_val) + for neighbor in neighbors: + recursive_fill(canvas, neighbor, old_val, new_val) + +def main(): + grid = np.zeros((5, 5)) + grid[2,:] = 1 + + answer = np.zeros((5, 5)) + answer[:3,] = 1 + + c0 = grid.copy() + c1 = grid.copy() + c2 = grid.copy() + + start_loc = Point(0, 0) + + recursive_fill(c0, start_loc, 0, 1) + queue_fill(c1, start_loc, 0, 1) + stack_fill(c2, start_loc, 0, 1) + + assert (c0 == answer).all() + assert (c1 == answer).all() + assert (c2 == answer).all() + + print("Tests Passed") + +if __name__ == "__main__": + main() diff --git a/contents/flood_fill/flood_fill.md b/contents/flood_fill/flood_fill.md new file mode 100644 index 000000000..ed84683a6 --- /dev/null +++ b/contents/flood_fill/flood_fill.md @@ -0,0 +1,300 @@ +# Flood Fill + +Flood fill is a method that is surprisingly useful in a large number of different situations and keeps finding me wherever I go. +When I was completing my PhD, I had an idea to track superfluid vortices by using flood fill as a way to help mask out unnecessary features of the simulation. +When I was making a terminal game, I thought of creating an animation that was just flood fill in disguise. +When I decided to play minesweeper or Go with my girlfriend, flood fill was used in both! + +Flood fill is probably most commonly known as the "Bucket Fill" application in most art programs {{ "gimp_bucket" | cite }}. +It's usually indicated by an icon that looks like a bucket and is known to fill in any enclosed area, as shown below: + +

+ +

+ +Because flood fill is incredibly common, there are a large number of variations to the method, some of which are more optimal than others. +For this chapter, we will cover the basics: how to fill a domain in a quick and dirty way. +In subsequent chapters, we will continue our journey by creating more and more efficient flood fill methods, including scanline-based and fixed memory methods {{ "torbert2016" | cite }}. + +I have decided to split the chapter up for a few important reasons: +1. I did not want to flood the Algorithm Archive with flood fill methods all at the same time. +I feel it's worth letting each chapter sit for a bit while we savor it's unique flavor. +2. Many users are implementing versions of each algorithm in their own languages and it is difficult to review and submit code for chapters with a lot of code chunks. +Several sub-chapters with less code is easier for everyone. +3. I am kinda under a time-constraint right now and wanted to make sure we regularly get content into the Algorithm Archive. + +So, without further a-do, let's hop right into it! + + +## What does flood fill do? + +Flood fill is essentially composed of 2 parts: +1. Determining the extents of the domain to fill +2. Walking through all elements within a domain and changing some property + +For the purposes of this chapter, we will be using a set of floating-point values that range from 0 to 1 instead of a color-space like RGB. +Though bucket fill is always used in art programs in some sort of color space, flood fill is more general and can be used in a space with any type of element. +As such, it makes sense to use a simpler element type so we can better understand the method. + +So how do we go about finding the extents of the domain to fill? + +Here, a domain will be defined as any connected set of elements in an $$n$$-dimensional space whose values do not vary beyond a predefined threshold. +As an example, if we take a circle embedded into a 2-dimensional grid, we have 3 separate domains: +1. Inside the circle where all elements are 0. +2. The circle, itself, where the elements are set to 0.75. +3. Outside the circle where all elements are similarly 0. + +

+ +

+ +Though there are some more complicated ways to determine the extents of the domain, we will not focus on this aspect of the flood fill method for the remainder of this chapter and instead leave it for subsequent chapters. +So now we will focus on the process of walking through each element in the domain and changing some property. + +## Domain traversal + +As before, the simplest example to work with is that of an image, where each element in our domain is a single pixel. +Here, we can connect each pixel to all other pixels in its vicinity, like so: + +

+ +

+ +In this image, a border is shown between each individual pixel and a grid is superimposed to show how each pixel is connected to its neighbors. +This means that each element has 4 neighbors: north, south, east, and west. +We could also include northeast, southeast, southwest, and northwest if we wanted to do an 8-way fill, but we will restrict the discussion to the 4-way fill for now, as the method is essentially the same and slightly easier to understand with fewer elements to worry about. + +By connecting each pixel to its neighbors in this way, the flood fill operation becomes a process of graph traversal, not too dissimilar from the [tree traversal](../tree_traversal/tree_traversal.md) methods described before. +This means that after selecting our initial location, we can then traverse through all elements in either a depth-first or breadth-first fashion. +We will be covering the following this chapter: + +1. Finding all neighbors +2. Depth-first node traversal +3. Breadth-first node traversal and small-scale optimizations + +So let's start by discussing how we might go about finding the neighbors to fill. + +### Finding all neighbors + +The first step of this method is to query the location of all possible neighbors. +At first glance, this seems rather straightforward. +One simply needs to look up, down, left, and right of the current location and add those elements to the list of neighbors if they are: + +1. On the canvas +2. Have a value *close enough* to the old value we would like to replace + +In code, this might look like this: + +{% method %} +{% sample lang="jl" %} +[import:23-41, lang:"julia"](code/julia/flood_fill.jl) +{% sample lang="c" %} +[import:28-46, lang:"c"](code/c/flood_fill.c) +{% sample lang="cpp" %} +[import:19-44, lang:"cpp"](code/cpp/flood_fill.cpp) +{% sample lang="py" %} +[import:10-25, lang="python"](code/python/flood_fill.py) +{% sample lang="coco" %} +[import:15-20, lang="coconut"](code/coconut/flood_fill.coco) +{% endmethod %} + + +This code is set up to return a vector of elements to then use for subsequent sections. + +### Depth-first node traversal + +Now that we have the ability to find all neighboring elements, we can proceed to traverse through those nodes in the most straightforward way: recursion. + +In code, it might look like this: + +{% method %} +{% sample lang="jl" %} +[import:92-104, lang:"julia"](code/julia/flood_fill.jl) +{% sample lang="c" %} +[import:174-189, lang:"c"](code/c/flood_fill.c) +{% sample lang="cpp" %} +[import:46-64, lang:"cpp"](code/cpp/flood_fill.cpp) +{% sample lang="py" %} +[import:55-63, lang="python"](code/python/flood_fill.py) +{% sample lang="coco" %} +[import:52-61, lang:"coconut"](code/coconut/flood_fill.coco) +{% endmethod %} + +The above code continues recursing through available neighbors as long as neighbors exist, and this should work so long as we are adding the correct set of neighbors. + +Additionally, it is possible to do the same type of traversal by managing a stack, like so: + +{% method %} +{% sample lang="jl" %} +[import:43-63, lang:"julia"](code/julia/flood_fill.jl) +{% sample lang="c" %} +[import:79-102, lang:"c"](code/c/flood_fill.c) +{% sample lang="cpp" %} +[import:95-123, lang:"cpp"](code/cpp/flood_fill.cpp) +{% sample lang="py" %} +[import:27-36, lang="python"](code/python/flood_fill.py) +{% sample lang="coco" %} +[import:23-34, lang:"coconut"](code/coconut/flood_fill.coco) +{% endmethod %} + +This is ultimately the same method of traversal as before; however, because we are managing our own data structure, there are a few distinct differences: +1. The manually managed stack could be slightly slower and potentially more memory-intensive +2. It is easy to reach the maximum recursion depth on certain hardware with the recursive method, so it is best to use the stack-based implementation in those cases. + +If we were to use either of these methods to fill a circle embedded in a two dimensional domain, we would see the following + +
+ +
+ +Here, we see that these methods will traverse through one direction first before filling from there. +This is potentially the easiest method to write, but it is not the most intuitive fill pattern. +I suspect that if someone was asked to fill the contents of the circle on their own, they would fill it more evenly from the center, like so: + +
+ +
+ +This is simply another traversal strategy known as breadth-first traversal and comes with its own set of caveats. +We will discuss this further in the next subsection + +### Breadth-first node traversal and small-scale optimizations + +Breadth-first node traversal is as simple as switching the stack in the depth-first strategy with a queue. +The code would look something like this: + +{% method %} +{% sample lang="jl" %} +[import:66-90, lang:"julia"](code/julia/flood_fill.jl) +{% sample lang="c" %} +[import:149-172, lang:"c"](code/c/flood_fill.c) +{% sample lang="cpp" %} +[import:66-93, lang:"cpp"](code/cpp/flood_fill.cpp) +{% sample lang="py" %} +[import:38-53, lang="python"](code/python/flood_fill.py) +{% sample lang="coco" %} +[import:36-49, lang:"coconut"](code/coconut/flood_fill.coco) +{% endmethod %} + +Now, there is a small trick in this code that must be considered to make sure it runs optimally. +Namely, the nodes must be colored *when they are being enqueued*, not when visiting the node. +At least for me, it was not immediately obvious why this would be the case, but let me try to explain. + +Let's imagine that we decided to write code that colored all neighboring nodes only when visiting them. +When querying all possible neighbors, we will add 4 elements to the queue for the north, south, east, and west neighbors of the initial node, as shown below: + +

+ +

+ +Now let's imagine we travel east first. +It then enqueues three more nodes: north, south, and east again. +This is shown below: + +

+ +

+ +It does not enqueue its west neighbor because this has already been colored. +At this stage, we will have six nodes ready to be colored and 2 that are already colored. +Now let's say we travel north next. +This node will enqueue three more nodes: west, north, and east, as shown below: + +

+ +

+ +The problem is that the east element has *already been enqueued for coloring by the previous node*!. +This shared element is colored in red. +As we progress through all four initial neighbors, we will find 4 nodes that are doubly enqueued: all directions diagonal to the initial location! +This is again shown below: + +

+ +

+ +As the number of nodes increases, so does the number of duplicate nodes. +A quick fix is to color the nodes *when they are being enqueued* like in the example code above. +When doing this, duplicates will not be enqueued with a breadth-first scheme because they will already be colored when other nodes are trying to find their neighbors. +This created a node connection pattern like so: + +

+ +

+ +As some final food for thought: why wasn't this a problem with the depth-first strategy? +The simple answer is that it actually was an issue, but it was way less prevalent. +With the depth-first strategy, a number of unnecessary nodes are still pushed to the stack, but because we consistently push one direction before spreading out to other directions, it is more likely that the nodes have filled neighbors when they are looking for what to fill around them. + +Simply put: depth-first traversal is slightly more efficient in this case unless you can color as querying for neighbors, in which case breadth-first is more efficient. + +## Conclusions + +As stated before, the method discussed in this chapter is just the tip of the iceberg and many other flood fill methods exist that are likely to be more efficient for most purposes. +These will all be covered in subsequent chapters which will come out somewhat regularly throughout the next few months, lest we flood that archive with flood fill methods. + +## Video Explanation + +Here is a video describing tree traversal: + +
+ +
+ +## Example Code + +The example code for this chapter will be the simplest application of flood fill that still adequately tests the code to ensure it is stopping at boundaries appropriately. +For this, we will create a two dimensional array of floats, all starting at 0.0, and then set a single vertical line of elements at the center to be 1.0. +After, we will fill in the left-hand side of the array to be all ones by choosing any point within the left domain to fill. + +{% method %} +{% sample lang="jl" %} +[import, lang:"julia"](code/julia/flood_fill.jl) +{% sample lang="c" %} +[import, lang:"c"](code/c/flood_fill.c) +{% sample lang="cpp" %} +[import, lang:"cpp"](code/cpp/flood_fill.cpp) +{% sample lang="py" %} +[import:, lang="python"](code/python/flood_fill.py) +{% sample lang="coco" %} +[import, lang="coconut"](code/coconut/flood_fill.coco) +{% endmethod %} + + +### Bibliography + +{% references %} {% endreferences %} + + + +## License + +##### Code Examples + +The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/main/LICENSE.md)). + +##### Text + +The text of this chapter was written by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). + +[

](https://creativecommons.org/licenses/by-sa/4.0/) + +##### Images/Graphics +- The image "[Example Bucket Fill](res/example.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[Circle Domains](res/simple_circle.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[Grid 1](res/grid_1.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[Grid 2](res/grid_2.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[Grid 3](res/grid_3.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[Grid 4](res/grid_4.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[Grid 5](res/grid_5.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The image "[Grid 6](res/grid_6.png)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Stack Fill](res/recurse_animation.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The video "[Queue Fill](res/queue_animation.mp4)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). diff --git a/contents/flood_fill/res/example.png b/contents/flood_fill/res/example.png new file mode 100644 index 000000000..7b330c20e Binary files /dev/null and b/contents/flood_fill/res/example.png differ diff --git a/contents/flood_fill/res/flood_fill.svg b/contents/flood_fill/res/flood_fill.svg new file mode 100644 index 000000000..59146f036 --- /dev/null +++ b/contents/flood_fill/res/flood_fill.svg @@ -0,0 +1,23 @@ + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + diff --git a/contents/flood_fill/res/grid_1.png b/contents/flood_fill/res/grid_1.png new file mode 100644 index 000000000..4d47e26ba Binary files /dev/null and b/contents/flood_fill/res/grid_1.png differ diff --git a/contents/flood_fill/res/grid_2.png b/contents/flood_fill/res/grid_2.png new file mode 100644 index 000000000..d6f2e5f5b Binary files /dev/null and b/contents/flood_fill/res/grid_2.png differ diff --git a/contents/flood_fill/res/grid_3.png b/contents/flood_fill/res/grid_3.png new file mode 100644 index 000000000..c561d07a1 Binary files /dev/null and b/contents/flood_fill/res/grid_3.png differ diff --git a/contents/flood_fill/res/grid_4.png b/contents/flood_fill/res/grid_4.png new file mode 100644 index 000000000..b7cded182 Binary files /dev/null and b/contents/flood_fill/res/grid_4.png differ diff --git a/contents/flood_fill/res/grid_5.png b/contents/flood_fill/res/grid_5.png new file mode 100644 index 000000000..65162503f Binary files /dev/null and b/contents/flood_fill/res/grid_5.png differ diff --git a/contents/flood_fill/res/grid_6.png b/contents/flood_fill/res/grid_6.png new file mode 100644 index 000000000..b6b3c0872 Binary files /dev/null and b/contents/flood_fill/res/grid_6.png differ diff --git a/contents/flood_fill/res/queue_animation.mp4 b/contents/flood_fill/res/queue_animation.mp4 new file mode 100644 index 000000000..e0386f14d Binary files /dev/null and b/contents/flood_fill/res/queue_animation.mp4 differ diff --git a/contents/flood_fill/res/recurse_animation.mp4 b/contents/flood_fill/res/recurse_animation.mp4 new file mode 100644 index 000000000..0185be25a Binary files /dev/null and b/contents/flood_fill/res/recurse_animation.mp4 differ diff --git a/contents/flood_fill/res/simple_circle.png b/contents/flood_fill/res/simple_circle.png new file mode 100644 index 000000000..2b69fdaac Binary files /dev/null and b/contents/flood_fill/res/simple_circle.png differ diff --git a/contents/fortran/fortran.md b/contents/fortran/fortran.md index f705d3c17..491a33b6e 100644 --- a/contents/fortran/fortran.md +++ b/contents/fortran/fortran.md @@ -6,7 +6,7 @@ Alright, so here's the thing about Fortran. It's old. ##### Code Examples -The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/master/LICENSE.md)). +The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/main/LICENSE.md)). ##### Text diff --git a/contents/forward_euler_method/code/asm-x64/SConscript b/contents/forward_euler_method/code/asm-x64/SConscript new file mode 100644 index 000000000..2a10fbc14 --- /dev/null +++ b/contents/forward_euler_method/code/asm-x64/SConscript @@ -0,0 +1,6 @@ +Import('files_to_compile env') + +for file_info in files_to_compile: + build_target = f'#/build/{file_info.language}/{file_info.chapter}/{file_info.path.stem}' + build_result = env.X64(build_target, str(file_info.path), LIBS='m', LINKFLAGS='-no-pie') + env.Alias(str(file_info.chapter), build_result) \ No newline at end of file diff --git a/contents/forward_euler_method/code/c/SConscript b/contents/forward_euler_method/code/c/SConscript new file mode 100644 index 000000000..b81220a0e --- /dev/null +++ b/contents/forward_euler_method/code/c/SConscript @@ -0,0 +1,6 @@ +Import('files_to_compile env') + +for file_info in files_to_compile: + build_target = f'#/build/{file_info.language}/{file_info.chapter}/{file_info.path.stem}' + build_result = env.C(build_target, str(file_info.path), LIBS='m') + env.Alias(str(file_info.chapter), build_result) \ No newline at end of file diff --git a/contents/forward_euler_method/code/c/euler.c b/contents/forward_euler_method/code/c/euler.c index 9ce095930..211e2f614 100644 --- a/contents/forward_euler_method/code/c/euler.c +++ b/contents/forward_euler_method/code/c/euler.c @@ -13,7 +13,7 @@ void solve_euler(double timestep, double *result, size_t n) { int check_result(double *result, size_t n, double threshold, double timestep) { int is_approx = 1; for (size_t i = 0; i < n; ++i) { - double solution = exp(-3.0 * i * timestep); + double solution = exp(-3.0 * (double)i * timestep); if (fabs(result[i] - solution) > threshold) { printf("%f %f\n", result[i], solution); is_approx = 0; diff --git a/contents/forward_euler_method/code/clisp/euler.lisp b/contents/forward_euler_method/code/clisp/euler.lisp new file mode 100644 index 000000000..04d7b6749 --- /dev/null +++ b/contents/forward_euler_method/code/clisp/euler.lisp @@ -0,0 +1,29 @@ +;;;; Forward euler implementation in Common Lisp + +(defun solve-euler (timestep n) + "Returns a function where y'(t) = -3t and y(0) = 0 using the forward euler method" + (loop + with result = (make-array n :initial-element 1) + for i from 1 upto (1- n) do + (setf (svref result i) (- (svref result (1- i)) (* 3 (svref result (1- i)) timestep))) + finally (return result))) + +(defun approximatep (result threshold timestep) + "Checks the result from the solve-euler function" + (loop + with approximatep = t + with solution = 0 + for i from 0 upto (1- (length result)) do + (setf solution (exp (* (- 3) i timestep))) + (when (> (- (svref result i) solution) threshold) + (setf approximatep nil) + (format t "~d ~d~%" (svref result i) solution)) + finally (return approximatep))) + +(defvar timestep 0.01) +(defvar n 100) ; number of steps +(defvar threshold 0.01) + +(defvar result (solve-euler timestep n)) +(defvar approximatep (approximatep result threshold timestep)) +(format t "~:[Value(s) not in threshold~;All values within threshold~]~%" approximatep) diff --git a/contents/forward_euler_method/code/coconut/euler.coco b/contents/forward_euler_method/code/coconut/euler.coco new file mode 100644 index 000000000..7297e9c51 --- /dev/null +++ b/contents/forward_euler_method/code/coconut/euler.coco @@ -0,0 +1,28 @@ +import math + +def forward_euler(time_step, n): + factors = [1] + [1 - 3 * time_step] * (n-1) + # We want all the cumulative values, thus the use of scan + return scan((*), factors) + + + +def check(result, threshold, time_step): + approx = True + # A scan object has a len if the underlying iterable has a len + solution = range(len(result)) |> map$(i -> math.exp(-3*i*time_step)) + for y, sol in zip(result, solution): + if not math.isclose(y, sol, abs_tol=threshold): + print(y, sol) + approx = False + return approx + + +if __name__ == '__main__': + time_step = 0.01 + n = 100 + threshold = 0.01 + + result = forward_euler(time_step, n) + approx = check(result, threshold, time_step) + print("All values within threshold") if approx else print("Value(s) not in threshold") diff --git a/contents/forward_euler_method/code/c++/euler.cpp b/contents/forward_euler_method/code/cpp/euler.cpp similarity index 95% rename from contents/forward_euler_method/code/c++/euler.cpp rename to contents/forward_euler_method/code/cpp/euler.cpp index a341655f4..0fbeb8426 100644 --- a/contents/forward_euler_method/code/c++/euler.cpp +++ b/contents/forward_euler_method/code/cpp/euler.cpp @@ -27,7 +27,7 @@ template bool check_result(Iter first, Iter last, double threshold, double timestep) { auto it = first; for (size_t idx = 0; it != last; ++idx, ++it) { - double solution = std::exp(-3.0 * idx * timestep); + double solution = std::exp(-3.0 * static_cast(idx) * timestep); if (std::abs(*it - solution) > threshold) { std::cout << "We found a value outside the threshold; the " << idx << "-th value was " << *it << ", but the expected solution was " diff --git a/contents/forward_euler_method/code/elm/elm-package.json b/contents/forward_euler_method/code/elm/elm-package.json deleted file mode 100644 index fccb4595b..000000000 --- a/contents/forward_euler_method/code/elm/elm-package.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "version": "1.0.0", - "summary": "helpful summary of your project, less than 80 characters", - "repository": "https://github.com/user/project.git", - "license": "BSD3", - "source-directories": [ - "." - ], - "exposed-modules": [], - "dependencies": { - "CallumJHays/elm-sliders": "1.0.1 <= v < 2.0.0", - "elm-lang/core": "5.1.1 <= v < 6.0.0", - "elm-lang/html": "2.0.0 <= v < 3.0.0", - "elm-lang/mouse": "1.0.1 <= v < 2.0.0", - "elm-lang/svg": "2.0.0 <= v < 3.0.0", - "elm-lang/window": "1.0.1 <= v < 2.0.0", - "rtfeldman/hex": "1.0.0 <= v < 2.0.0" - }, - "elm-version": "0.18.0 <= v < 0.19.0" -} diff --git a/contents/forward_euler_method/code/elm/elm.json b/contents/forward_euler_method/code/elm/elm.json new file mode 100644 index 000000000..5eac761dd --- /dev/null +++ b/contents/forward_euler_method/code/elm/elm.json @@ -0,0 +1,28 @@ +{ + "type": "application", + "source-directories": [ + "src" + ], + "elm-version": "0.19.1", + "dependencies": { + "direct": { + "bemyak/elm-slider": "1.0.0", + "elm/browser": "1.0.2", + "elm/core": "1.0.5", + "elm/html": "1.0.0", + "elm/json": "1.1.3", + "elm/svg": "1.0.1", + "elm/time": "1.0.0", + "rtfeldman/elm-hex": "1.0.0" + }, + "indirect": { + "debois/elm-dom": "1.3.0", + "elm/url": "1.0.0", + "elm/virtual-dom": "1.0.2" + } + }, + "test-dependencies": { + "direct": {}, + "indirect": {} + } +} diff --git a/contents/forward_euler_method/code/elm/euler.elm b/contents/forward_euler_method/code/elm/euler.elm deleted file mode 100644 index a8d8d3c89..000000000 --- a/contents/forward_euler_method/code/elm/euler.elm +++ /dev/null @@ -1,332 +0,0 @@ -module Euler exposing (..) - -import Html exposing (Html, div, button, text, h3) -import Html.Attributes exposing (style) -import Html.Events exposing (onClick, on) -import Time exposing (Time, second) -import Maybe exposing (withDefault) -import Window exposing (Size, size) -import Svg exposing (svg, circle, line, polyline) -import Svg.Attributes exposing (width, height, stroke, x1, x2, y1, y2, cx, cy, r, points, fill) -import Task exposing (perform) -import Slider exposing (..) -import Mouse -import Json.Decode as Decode -import Hex - - -main = - Html.program - { init = init - , view = view - , update = update - , subscriptions = subscriptions - } - - - --- MODEL - - -type alias Model = - { part : Particle - , dt : Time - , dt0 : Time - , t : Time - , status : Status - , wWidth : Int - , wHeight : Int - , history : List ( Time, Time, Particle ) - , drag : Maybe Drag - } - - -type alias Position = - Float - - -type alias Velocity = - Float - - -type alias Particle = - { pos : List Position, vel : List Velocity } - - -type Status - = Idle - | Running - - -type alias Drag = - { start : Position - , current : Position - } - - -getX : Particle -> Position -getX p = - withDefault 0 <| List.head <| .pos p - - -getV : Particle -> Velocity -getV p = - withDefault 0 <| List.head <| .vel p - - -getX0 : Model -> Position -getX0 m = - let - scale x = - 3 - 6 * x / (toFloat m.wHeight) - in - case m.drag of - Nothing -> - getX m.part - - Just { start, current } -> - getX m.part + scale current - scale start - - - --- INIT - - -init : ( Model, Cmd Msg ) -init = - ( Model (Particle [ x0 ] [ 0 ]) 0.5 0.5 0 Idle 0 0 [] Nothing, perform GetSize size ) - - -x0 : Position -x0 = - 2.5 - - - --- UPDATE - - -type Msg - = Start - | Stop - | Tick Time - | GetSize Size - | SliderUpdate Float - | DragStart Mouse.Position - | DragAt Mouse.Position - | DragEnd Mouse.Position - - -update : Msg -> Model -> ( Model, Cmd Msg ) -update msg model = - case msg of - Start -> - ( { model - | status = Running - , t = 0 - , dt = model.dt0 - , drag = Nothing - } - , Cmd.none - ) - - Stop -> - ( { model - | status = Idle - , part = Particle [ x0 ] [ 0 ] - , t = 0 - } - , Cmd.none - ) - - Tick _ -> - case model.status of - Idle -> - ( model, Cmd.none ) - - Running -> - if model.t > 5 + model.dt then - ( { model - | status = Idle - , part = Particle [ x0 ] [ 0 ] - , history = ( model.dt, model.t, model.part ) :: model.history - , t = 0 - } - , Cmd.none - ) - else - ( { model - | part = evolve model.part model.t model.dt - , t = model.t + model.dt - } - , perform GetSize size - ) - - GetSize s -> - ( { model | wWidth = s.width, wHeight = s.height * 8 // 10 }, Cmd.none ) - - SliderUpdate dt -> - ( { model | dt0 = dt }, Cmd.none ) - - DragStart { x, y } -> - case model.status of - Idle -> - ( { model | drag = Just (Drag (toFloat y) (toFloat y)) }, Cmd.none ) - - Running -> - ( model, Cmd.none ) - - DragAt { x, y } -> - ( { model | drag = Maybe.map (\{ start } -> Drag start (toFloat y)) model.drag } - , Cmd.none - ) - - DragEnd _ -> - ( { model - | drag = Nothing - , part = Particle [ getX0 model ] [ k * getX0 model ] - } - , Cmd.none - ) - - -k : Float -k = - -2 - - -diffEq : Position -> Velocity -> Time -> Time -> ( Position, Velocity ) -diffEq x v t dt = - ( x + (k * x) * dt, k * (x + (k * x) * dt) ) - - -evolve : Particle -> Time -> Time -> Particle -evolve p t dt = - let - ( x, v ) = - diffEq (getX p) (getV p) t dt - in - { p | pos = x :: p.pos, vel = v :: p.vel } - - - --- SUBSCRIPTIONS - - -subscriptions : Model -> Sub Msg -subscriptions model = - case model.drag of - Nothing -> - Time.every (model.dt * second) Tick - - Just _ -> - Sub.batch [ Mouse.moves DragAt, Mouse.ups DragEnd ] - - - --- VIEW - - -view : Model -> Html Msg -view model = - div [] - [ h3 [] [ text "Drag the ball up or down, pick a dt and click Start" ] - , h3 [ style [ ( "color", gradient model.dt0 ) ] ] - [ viewSlider - , text ("dt = " ++ toString model.dt0) - , button [ onClick Start ] [ text "Start" ] - , button [ onClick Stop ] [ text "Stop" ] - ] - , svg - [ width (toString model.wWidth) - , height (toString model.wHeight) - , stroke "black" - ] - ([ line - [ x1 "0" - , x2 (toString model.wWidth) - , y1 (toString (model.wHeight // 2)) - , y2 (toString (model.wHeight // 2)) - ] - [] - , line - [ x1 (toString (model.wWidth // 20)) - , x2 (toString (model.wWidth // 20)) - , y1 "0" - , y2 (toString model.wHeight) - ] - [] - , viewCircle model - ] - ++ (plotHistory model) - ) - ] - - -viewSlider : Html Msg -viewSlider = - props2view [ MinVal 0, MaxVal 1, Step 0.01, onChange SliderUpdate ] - - -scaleX : Int -> Position -> String -scaleX h x = - toString (toFloat h / 2 * (1 - x / 3)) - - -scaleT : Int -> Time -> String -scaleT w t = - toString (toFloat w * (0.05 + t / 5)) - - -viewCircle : Model -> Html Msg -viewCircle m = - circle - [ cy (scaleX m.wHeight (getX0 m)) - , cx (scaleT m.wWidth m.t) - , r "10" - , on "mousedown" (Decode.map DragStart Mouse.position) - ] - [] - - -plotPath : Int -> Int -> ( Time, Time, Particle ) -> String -plotPath w h ( dt, tf, particle ) = - let - comb x ( t, s ) = - ( t - dt, s ++ (scaleT w t) ++ "," ++ (scaleX h x) ++ " " ) - in - Tuple.second <| List.foldl comb ( tf, "" ) particle.pos - - -plotHistory : Model -> List (Html Msg) -plotHistory m = - let - ( w, h ) = - ( m.wWidth, m.wHeight ) - in - List.map - (\( dt, t, p ) -> - polyline - [ stroke "black" - , fill "none" - , stroke (gradient dt) - , points (plotPath w h ( dt, t, p )) - ] - [] - ) - (( m.dt, m.t, m.part ) :: m.history) - - -gradient : Time -> String -gradient dt = - let - ( r, g, b ) = - ( round (255 * dt), 0, round (255 * (1 - dt)) ) - - col = - Hex.toString (256 * (256 * r + g) + b) - in - if String.length col < 6 then - "#" ++ String.repeat (6 - String.length col) "0" ++ col - else - "#" ++ col diff --git a/contents/forward_euler_method/code/elm/src/Euler.elm b/contents/forward_euler_method/code/elm/src/Euler.elm new file mode 100644 index 000000000..c9207d2de --- /dev/null +++ b/contents/forward_euler_method/code/elm/src/Euler.elm @@ -0,0 +1,397 @@ +module Euler exposing (..) + +import Browser +import Browser.Dom exposing (Viewport) +import Browser.Events as Events +import Hex +import Html exposing (Html, button, div, h3, text) +import Html.Attributes exposing (style) +import Html.Events exposing (on, onClick) +import Json.Decode as Decode exposing (Decoder) +import Maybe +import SingleSlider as Slider +import Svg exposing (circle, line, polyline, svg) +import Svg.Attributes exposing (cx, cy, fill, height, points, r, stroke, width, x1, x2, y1, y2) +import Task +import Time exposing (Posix) + + +main : Platform.Program () Model Msg +main = + Browser.element + { init = \() -> init + , view = view + , update = update + , subscriptions = subscriptions + } + + + +-- MODEL + + +type alias Model = + { part : Particle + , dt : Time + , dt0 : Time + , t : Time + , status : Status + , wWidth : Float + , wHeight : Float + , history : List ( Time, Time, Particle ) + , drag : Maybe Drag + , slider : Slider.Model + } + + +x0 : Position +x0 = + 2.5 + + +init : ( Model, Cmd Msg ) +init = + ( { part = Particle [ x0 ] [ 0 ] + , dt = 0.25 + , dt0 = 0.25 + , t = 0 + , status = Idle + , wWidth = 0 + , wHeight = 0 + , history = [] + , drag = Nothing + , slider = + { min = 0 + , max = 1 + , step = 0.01 + , value = 0.25 + , minFormatter = \_ -> "" + , maxFormatter = \_ -> "" + , currentValueFormatter = \_ _ -> "" + , disabled = False + } + } + , Task.perform GetViewPort Browser.Dom.getViewport + ) + + +type alias Time = + Float + + +type alias Position = + Float + + +type alias Velocity = + Float + + +type alias Particle = + { pos : List Position, vel : List Velocity } + + +type Status + = Idle + | Running + + +type alias Drag = + { start : Float + , current : Float + } + + +getX : Particle -> Position +getX p = + Maybe.withDefault 0 <| List.head <| .pos p + + +getV : Particle -> Velocity +getV p = + Maybe.withDefault 0 <| List.head <| .vel p + + +getX0 : Model -> Position +getX0 m = + let + scale x = + 3 - 6 * x / m.wHeight + in + case m.drag of + Nothing -> + getX m.part + + Just { start, current } -> + getX m.part + scale current - scale start + + +resetParticle : Particle -> Particle +resetParticle { pos, vel } = + case ( List.reverse pos, List.reverse vel ) of + ( x :: _, v :: _ ) -> + Particle [ x ] [ v ] + + _ -> + Particle [ x0 ] [ 0 ] + + + +-- UPDATE + + +type Msg + = Start + | Stop + | Tick Posix + | GetViewPort Viewport + | SliderUpdate Float + | SliderMsg Slider.Msg + | DragStart Float + | DragAt Float + | DragEnd Float + + +update : Msg -> Model -> ( Model, Cmd Msg ) +update msg model = + case msg of + Start -> + ( { model + | status = Running + , t = 0 + , dt = model.dt0 + , drag = Nothing + , part = resetParticle model.part + } + , Cmd.none + ) + + Stop -> + ( { model + | status = Idle + , part = resetParticle model.part + , t = 0 + } + , Cmd.none + ) + + Tick _ -> + case model.status of + Idle -> + ( model, Cmd.none ) + + Running -> + if model.t > 5 + model.dt then + ( { model + | status = Idle + , part = Particle [ x0 ] [ 0 ] + , history = ( model.dt, model.t, model.part ) :: model.history + , t = 0 + } + , Cmd.none + ) + + else + ( { model + | part = evolve model.part model.t model.dt + , t = model.t + model.dt + } + , Task.perform GetViewPort Browser.Dom.getViewport + ) + + GetViewPort { viewport } -> + ( { model | wWidth = viewport.width, wHeight = viewport.height * 8 / 10 }, Cmd.none ) + + SliderUpdate dt -> + ( { model | dt0 = dt }, Cmd.none ) + + SliderMsg sliderMsg -> + let + ( newSlider, cmd, updateResults ) = + Slider.update sliderMsg model.slider + + newModel = + { model | slider = newSlider, dt0 = newSlider.value } + + newCmd = + if updateResults then + Cmd.batch [ Cmd.map SliderMsg cmd, Cmd.none ] + + else + Cmd.none + in + ( newModel, newCmd ) + + DragStart y -> + case model.status of + Idle -> + ( { model | drag = Just (Drag y y) }, Cmd.none ) + + Running -> + ( model, Cmd.none ) + + DragAt y -> + ( { model | drag = Maybe.map (\{ start } -> Drag start y) model.drag } + , Cmd.none + ) + + DragEnd _ -> + ( { model + | drag = Nothing + , part = Particle [ getX0 model ] [ k * getX0 model ] + } + , Cmd.none + ) + + +k : Float +k = + -2 + + +diffEq : Position -> Velocity -> Time -> Time -> ( Position, Velocity ) +diffEq x _ _ dt = + ( x + (k * x) * dt, k * (x + (k * x) * dt) ) + + +evolve : Particle -> Time -> Time -> Particle +evolve p t dt = + let + ( x, v ) = + diffEq (getX p) (getV p) t dt + in + { p | pos = x :: p.pos, vel = v :: p.vel } + + + +-- SUBSCRIPTIONS + + +subscriptions : Model -> Sub Msg +subscriptions model = + (Slider.subscriptions model.slider |> Sub.map SliderMsg) + :: (case model.drag of + Nothing -> + [ Time.every (model.dt * 1000) Tick ] + + Just _ -> + [ Events.onMouseMove (Decode.map DragAt decodeMouseHeight) + , Events.onMouseUp (Decode.map DragEnd decodeMouseHeight) + ] + ) + |> Sub.batch + + +decodeMouseHeight : Decoder Float +decodeMouseHeight = + Decode.field "pageY" Decode.float + + + +-- VIEW + + +view : Model -> Html Msg +view model = + div [] + [ h3 [] [ text "Drag the ball up or down, pick a dt and click Start" ] + , h3 [ style "color" (gradient model.dt0) ] + [ viewSlider model.slider + , button [ onClick Start ] [ text "Start" ] + , button [ onClick Stop ] [ text "Stop" ] + , text ("dt = " ++ String.fromFloat model.dt0) + ] + , svg + [ width (String.fromFloat model.wWidth) + , height (String.fromFloat model.wHeight) + , stroke "black" + ] + ([ line + [ x1 "0" + , x2 (String.fromFloat model.wWidth) + , y1 (String.fromFloat (model.wHeight / 2)) + , y2 (String.fromFloat (model.wHeight / 2)) + ] + [] + , line + [ x1 (String.fromFloat (model.wWidth / 20)) + , x2 (String.fromFloat (model.wWidth / 20)) + , y1 "0" + , y2 (String.fromFloat model.wHeight) + ] + [] + , viewCircle model + ] + ++ plotHistory model + ) + ] + + +viewSlider : Slider.Model -> Html Msg +viewSlider slider = + Slider.view slider |> Html.map SliderMsg + + +scaleX : Float -> Position -> String +scaleX h x = + String.fromFloat (h / 2 * (1 - x / 3)) + + +scaleT : Float -> Time -> String +scaleT w t = + String.fromFloat (w * (0.05 + t / 5)) + + +viewCircle : Model -> Html Msg +viewCircle m = + circle + [ cy (scaleX m.wHeight (getX0 m)) + , cx (scaleT m.wWidth m.t) + , r "10" + , on "mousedown" (Decode.map DragStart decodeMouseHeight) + ] + [] + + +plotPath : Float -> Float -> ( Time, Time, Particle ) -> String +plotPath w h ( dt, tf, particle ) = + let + comb x ( t, s ) = + ( t - dt, s ++ scaleT w t ++ "," ++ scaleX h x ++ " " ) + in + Tuple.second <| List.foldl comb ( tf, "" ) particle.pos + + +plotHistory : Model -> List (Html Msg) +plotHistory m = + let + ( w, h ) = + ( m.wWidth, m.wHeight ) + in + List.map + (\( dt, t, p ) -> + polyline + [ stroke "black" + , fill "none" + , stroke (gradient dt) + , points (plotPath w h ( dt, t, p )) + ] + [] + ) + (( m.dt, m.t, m.part ) :: m.history) + + +gradient : Time -> String +gradient dt = + let + ( r, g, b ) = + ( round (255 * dt), 0, round (255 * (1 - dt)) ) + + col = + Hex.toString (256 * (256 * r + g) + b) + in + if String.length col < 6 then + "#" ++ String.repeat (6 - String.length col) "0" ++ col + + else + "#" ++ col diff --git a/contents/forward_euler_method/code/golang/euler.go b/contents/forward_euler_method/code/go/euler.go similarity index 100% rename from contents/forward_euler_method/code/golang/euler.go rename to contents/forward_euler_method/code/go/euler.go diff --git a/contents/forward_euler_method/code/v/euler.v b/contents/forward_euler_method/code/v/euler.v new file mode 100644 index 000000000..e590bbdcf --- /dev/null +++ b/contents/forward_euler_method/code/v/euler.v @@ -0,0 +1,37 @@ +import math + +fn forward_euler(timestep f64, n int) []f64 { + mut res := [f64(0.0)].repeat(n) + res[0] = f64(1) + for x := 1; x < n; x++ { + res[x] = res[x-1] - 3.0*res[x-1]*timestep + } + return res +} + +fn check(result []f64, threshold, timestep f64) bool { + mut approx := true + for x := 0; x < result.len; x++ { + solution := math.exp(-3.0 * f64(x) * timestep) + if math.abs(result[x]-solution) > threshold { + tmp := result[x] + println("There is a mismatch: abs($tmp-$solution) > $threshold!") + approx = false + } + } + return approx +} + +fn main() { + timestep := .01 + threshold := .01 + n := 100 + + result := forward_euler(timestep, n) + + if check(result, threshold, timestep) { + println("All values within threshold") + } else { + println("Value(s) not within threshold") + } +} \ No newline at end of file diff --git a/contents/forward_euler_method/forward_euler_method.md b/contents/forward_euler_method/forward_euler_method.md index 39a2f5821..f2f21c8b9 100644 --- a/contents/forward_euler_method/forward_euler_method.md +++ b/contents/forward_euler_method/forward_euler_method.md @@ -96,7 +96,7 @@ That said, variations of this method *are* certainly used (for example Crank-Nic Here is a video describing the forward Euler method:
- +
## Example Code @@ -112,15 +112,15 @@ Note that in this case, the velocity is directly given by the ODE and the accele {% sample lang="c" %} [import, lang:"c"](code/c/euler.c) {% sample lang="cpp" %} -[import, lang:"cpp"](code/c++/euler.cpp) +[import, lang:"cpp"](code/cpp/euler.cpp) {% sample lang="rs" %} [import, lang:"rust"](code/rust/euler.rs) {% sample lang="elm" %} -[import:44-54, lang:"elm"](code/elm/euler.elm) -[import:193-210, lang:"elm"](code/elm/euler.elm) +[import:78-91, lang:"elm"](code/elm/src/Euler.elm) +[import:236-252, lang:"elm"](code/elm/src/Euler.elm) Full code for the visualization follows: -[import, lang:"elm"](code/elm/euler.elm) +[import, lang:"elm"](code/elm/src/Euler.elm) {% sample lang="py" %} [import, lang:"python"](code/python/euler.py) @@ -135,13 +135,19 @@ Full code for the visualization follows: {% sample lang="f90" %} [import, lang:"fortran"](code/fortran/euler.f90) {% sample lang="go" %} -[import, lang:"go"](code/golang/euler.go) +[import, lang:"go"](code/go/euler.go) +{% sample lang="v" %} +[import, lang:"v"](code/v/euler.v) {% sample lang="asm-x64" %} [import, lang:"asm-x64"](code/asm-x64/euler.s) {% sample lang="java" %} [import, lang:"java"](code/java/ForwardEuler.java) {% sample lang="nim" %} [import, lang:"nim"](code/nim/forwardeuler.nim) +{% sample lang="lisp" %} +[import, lang="lisp"](code/clisp/euler.lisp) +{%sample lang="coco" %} +[import, lang:"coconut"](code/coconut/euler.coco) {% endmethod %} + +### Bibliography + +{% references %} {% endreferences %} + +## License + +##### Code Examples + +The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/master/LICENSE.md)). + + +##### Images/Graphics +- The animation "[Animated Random Walk](res/animated_random_walk.gif)" was created by [K. Shudipto Amin](https://github.com/shudipto-amin) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). + +- The animation "[Animated Metropolis](res/animated_metropolis.gif)" was created by [K. Shudipto Amin](https://github.com/shudipto-amin) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). + +- The image "[Plot of P(x)](res/plot_of_P.png)" was created by [K. Shudipto Amin](https://github.com/shudipto-amin) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). + +- The image "[Multiple Histograms](res/multiple_histograms.png)" was created by [K. Shudipto Amin](https://github.com/shudipto-amin) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). + +##### Text + +The text of this chapter was written by [K. Shudipto Amin](https://github.com/shudipto-amin) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). + +[

](https://creativecommons.org/licenses/by-sa/4.0/) + +##### Pull Requests + +After initial licensing ([#560](https://github.com/algorithm-archivists/algorithm-archive/pull/560)), the following pull requests have modified the text or graphics of this chapter: +- none diff --git a/contents/metropolis/res/1D_particles.png b/contents/metropolis/res/1D_particles.png new file mode 100644 index 000000000..3d8ab7d99 Binary files /dev/null and b/contents/metropolis/res/1D_particles.png differ diff --git a/contents/metropolis/res/animated_metropolis.gif b/contents/metropolis/res/animated_metropolis.gif new file mode 100644 index 000000000..93356bb7e Binary files /dev/null and b/contents/metropolis/res/animated_metropolis.gif differ diff --git a/contents/metropolis/res/animated_metropolis.mp4 b/contents/metropolis/res/animated_metropolis.mp4 new file mode 100644 index 000000000..68bfc36ec Binary files /dev/null and b/contents/metropolis/res/animated_metropolis.mp4 differ diff --git a/contents/metropolis/res/animated_random_walk.gif b/contents/metropolis/res/animated_random_walk.gif new file mode 100644 index 000000000..f80bc11b0 Binary files /dev/null and b/contents/metropolis/res/animated_random_walk.gif differ diff --git a/contents/metropolis/res/animated_random_walk.mp4 b/contents/metropolis/res/animated_random_walk.mp4 new file mode 100644 index 000000000..d7825ec3c Binary files /dev/null and b/contents/metropolis/res/animated_random_walk.mp4 differ diff --git a/contents/metropolis/res/multiple_histograms.png b/contents/metropolis/res/multiple_histograms.png new file mode 100644 index 000000000..9180253bc Binary files /dev/null and b/contents/metropolis/res/multiple_histograms.png differ diff --git a/contents/metropolis/res/plot_of_P.png b/contents/metropolis/res/plot_of_P.png new file mode 100644 index 000000000..3e4f3d10b Binary files /dev/null and b/contents/metropolis/res/plot_of_P.png differ diff --git a/contents/monte_carlo_integration/code/asm-x64/monte_carlo.s b/contents/monte_carlo_integration/code/asm-x64/monte_carlo.s index 61355cc95..4fb6d93aa 100644 --- a/contents/monte_carlo_integration/code/asm-x64/monte_carlo.s +++ b/contents/monte_carlo_integration/code/asm-x64/monte_carlo.s @@ -84,5 +84,6 @@ main: call printf add rsp, 16 pop rbp + xor rax, rax # Set exit code to 0 ret diff --git a/contents/monte_carlo_integration/code/c/monte_carlo.c b/contents/monte_carlo_integration/code/c/monte_carlo.c index 9920ff55c..14f823fe4 100644 --- a/contents/monte_carlo_integration/code/c/monte_carlo.c +++ b/contents/monte_carlo_integration/code/c/monte_carlo.c @@ -24,7 +24,7 @@ double monte_carlo(unsigned int samples) { } int main() { - srand(time(NULL)); + srand((unsigned int)time(NULL)); double estimate = monte_carlo(1000000); diff --git a/contents/monte_carlo_integration/code/clojure/monte_carlo.clj b/contents/monte_carlo_integration/code/clojure/monte_carlo.clj index f66baef67..de517e56c 100644 --- a/contents/monte_carlo_integration/code/clojure/monte_carlo.clj +++ b/contents/monte_carlo_integration/code/clojure/monte_carlo.clj @@ -8,9 +8,11 @@ (map #(* % %)) (reduce +)) (* r r))) + (defn rand-point [r] "return a random point from (0,0) inclusive to (r,r) exclusive" (repeatedly 2 #(rand r))) + (defn monte-carlo [n r] "take the number of random points and radius return an estimate to pi" @@ -22,11 +24,12 @@ pi" (if (in-circle? (rand-point r) r) (inc count) count)))))) + (defn -main [] (let [constant-pi Math/PI computed-pi (monte-carlo 10000000 2) ;; this may take some time on lower end machines difference (Math/abs (- constant-pi computed-pi)) error (* 100 (/ difference constant-pi))] (println "world's PI: " constant-pi - ",our PI: " (double computed-pi) + ",our PI: " (double computed-pi) ",error: " error))) diff --git a/contents/monte_carlo_integration/code/coconut/monte_carlo.coco b/contents/monte_carlo_integration/code/coconut/monte_carlo.coco new file mode 100644 index 000000000..7d58c077c --- /dev/null +++ b/contents/monte_carlo_integration/code/coconut/monte_carlo.coco @@ -0,0 +1,28 @@ +import math +import random + +data point(x, y): + def __abs__(self) = (self.x, self.y) |> map$(pow$(?, 2)) |> sum |> math.sqrt + +def in_circle(point(p), radius = 1): + """Return True if the point is in the circle and False otherwise.""" + return abs(p) < radius + +def monte_carlo(n_samples, radius = 1) = (range(n_samples) + |> map$(-> point(random.uniform(0, radius), random.uniform(0, radius))) + |> filter$(in_circle$(?, radius)) + |> tuple + |> len) * 4 / n_samples + +if __name__ == '__main__': + + samples = 100_000 + + print(f"Using {samples:_} samples.") + + pi_estimate = monte_carlo(samples) + percent_error = 100*abs(math.pi - pi_estimate)/math.pi + + print("The estimate of pi is: {:.3f}".format(pi_estimate)) + print("The percent error is: {:.3f}".format(percent_error)) + diff --git a/contents/monte_carlo_integration/code/c++/monte_carlo.cpp b/contents/monte_carlo_integration/code/cpp/monte_carlo.cpp similarity index 88% rename from contents/monte_carlo_integration/code/c++/monte_carlo.cpp rename to contents/monte_carlo_integration/code/cpp/monte_carlo.cpp index beff97170..4a600d72e 100644 --- a/contents/monte_carlo_integration/code/c++/monte_carlo.cpp +++ b/contents/monte_carlo_integration/code/cpp/monte_carlo.cpp @@ -37,12 +37,7 @@ double monte_carlo_pi(unsigned samples) { } int main() { - unsigned samples; - - std::cout << "Enter samples to use: "; - std::cin >> samples; - - double pi_estimate = monte_carlo_pi(samples); + double pi_estimate = monte_carlo_pi(10000000); std::cout << "Pi = " << pi_estimate << '\n'; std::cout << "Percent error is: " << 100 * std::abs(pi_estimate - PI) / PI << " %\n"; } diff --git a/contents/monte_carlo_integration/code/julia/monte_carlo.jl b/contents/monte_carlo_integration/code/julia/monte_carlo.jl index 51b9335a8..84578d15a 100644 --- a/contents/monte_carlo_integration/code/julia/monte_carlo.jl +++ b/contents/monte_carlo_integration/code/julia/monte_carlo.jl @@ -23,9 +23,9 @@ function monte_carlo(n::Int64) # The formula is pi = (box_length^2 / radius^2) * (pi_count / n), but we # are only using the upper quadrant and the unit circle, so we can use # 4*pi_count/n instead - pi_estimate = 4*pi_count/n - println("The pi estimate is: ", pi_estimate) - println("Percent error is: ", signif(100 * abs(pi_estimate - pi) / pi, 3), " %") + return 4*pi_count/n end -monte_carlo(10000000) +pi_estimate = monte_carlo(10000000) +println("The pi estimate is: ", pi_estimate) +println("Percent error is: ", 100 * abs(pi_estimate - pi) / pi, " %") diff --git a/contents/monte_carlo_integration/code/lua/monte_carlo.lua b/contents/monte_carlo_integration/code/lua/monte_carlo.lua index 99fcc4358..69ba78a0c 100644 --- a/contents/monte_carlo_integration/code/lua/monte_carlo.lua +++ b/contents/monte_carlo_integration/code/lua/monte_carlo.lua @@ -1,16 +1,22 @@ +-- function to determine whether an x, y point is in the unit circle local function in_circle(x, y) - return x*x + y*y <= 1 + return x*x + y*y < 1 end +-- function to integrate a unit circle to find pi via monte_carlo function monte_carlo(nsamples) local count = 0 - + for i = 1,nsamples do if in_circle(math.random(), math.random()) then count = count + 1 end end - + + -- This is using a quarter of the unit sphere in a 1x1 box. + -- The formula is pi = (box_length^2 / radius^2) * (pi_count / n), but we + -- are only using the upper quadrant and the unit circle, so we can use + -- 4*pi_count/n instead return 4 * count/nsamples end diff --git a/contents/monte_carlo_integration/code/matlab/monte.m b/contents/monte_carlo_integration/code/matlab/monte.m new file mode 100644 index 000000000..dc0ee8092 --- /dev/null +++ b/contents/monte_carlo_integration/code/matlab/monte.m @@ -0,0 +1,21 @@ +pi_estimate = monte_carlo(10000000); + +fprintf("The pi estimate is: %f\n", pi_estimate); +fprintf("Percent error is: %f%%\n", 100 * abs(pi_estimate - pi) / pi); + +function pi_estimate=monte_carlo(n) + + % a 2 by n array, rows are xs and ys + xy_array = rand(2, n); + + % square every element in the array + squares_array = xy_array.^2; + + % sum the xs and ys and check if it's in the quarter circle + incircle_array = sum(squares_array)<1; + + % determine the average number of points in the circle + pi_estimate = 4*sum(incircle_array)/n; + +end + diff --git a/contents/monte_carlo_integration/code/powershell/MonteCarlo.ps1 b/contents/monte_carlo_integration/code/powershell/MonteCarlo.ps1 new file mode 100644 index 000000000..fe80d6db0 --- /dev/null +++ b/contents/monte_carlo_integration/code/powershell/MonteCarlo.ps1 @@ -0,0 +1,21 @@ +function Is-InCircle($x, $y, $radius=1) { + return ([Math]::Pow($x, 2) + [Math]::Pow($y, 2)) -lt [Math]::Pow($radius, 2) +} + +function Monte-Carlo([int]$n) { + $PiCount = 0; + for ($i = 0; $i -lt $n; $i++) { + $x = Get-Random -Minimum 0.0 -Maximum 1.0 + $y = Get-Random -Minimum 0.0 -Maximum 1.0 + + if (Is-InCircle $x $y) { + $PiCount++ + } + } + return 4.0 * $PiCount / $n +} + +# This could take some time +$PiEstimate = Monte-Carlo 10000000 +Write-Host "The pi estimate is: $PiEstimate" +Write-Host "Percent error is: $(100 * [Math]::Abs($PiEstimate - ([Math]::PI)) / ([Math]::PI))" \ No newline at end of file diff --git a/contents/monte_carlo_integration/code/python/monte_carlo.py b/contents/monte_carlo_integration/code/python/monte_carlo.py index 041839dc2..c21bb054e 100644 --- a/contents/monte_carlo_integration/code/python/monte_carlo.py +++ b/contents/monte_carlo_integration/code/python/monte_carlo.py @@ -19,7 +19,7 @@ def monte_carlo(n_samples, radius = 1): if(in_circle(x, y, radius)): in_circle_count += 1 - # Since we've generated points in upper left quadrant ([0,radius], [0, radius]) + # Since we've generated points in upper right quadrant ([0,radius], [0, radius]) # We need to multiply the number of points by 4 pi_estimate = 4 * in_circle_count / (n_samples) diff --git a/contents/monte_carlo_integration/code/racket/monte_carlo.rkt b/contents/monte_carlo_integration/code/racket/monte_carlo.rkt index 0f652db93..0278e7ed6 100755 --- a/contents/monte_carlo_integration/code/racket/monte_carlo.rkt +++ b/contents/monte_carlo_integration/code/racket/monte_carlo.rkt @@ -1,22 +1,25 @@ -#lang racket -(define (in_circle x y) - (< (+ (sqr x) (sqr y)) 1) - ) +#lang racket/base -(define (monte_carlo_pi n) - (* (/ (local ((define (monte_carlo* n count) +(require racket/local) +(require racket/math) + +(define (in-circle x y) + "Checks if a point is in a unit circle" + (< (+ (sqr x) (sqr y)) 1)) + +(define (monte-carlo-pi n) + "Returns an approximation of pi" + (* (/ (local ((define (monte-carlo-pi* n count) (if (= n 0) count - (monte_carlo_pi* (sub1 n) - (if (in_circle (random) (random)) - (add1 count) - count - ) - ) - ) - )) (monte_carlo_pi* n 0) - ) n) 4) - ) - + (monte-carlo-pi* (sub1 n) + (if (in-circle (random) (random)) + (add1 count) + count))))) + (monte-carlo-pi* n 0)) n) 4)) -(display (monte_carlo_pi 1000)) +(define nsamples 5000000) +(define pi-estimate (monte-carlo-pi nsamples)) +(displayln (string-append "Estimate (rational): " (number->string pi-estimate))) +(displayln (string-append "Estimate (float): " (number->string (real->single-flonum pi-estimate)))) +(displayln (string-append "Error:" (number->string (* (/ (abs (- pi-estimate pi)) pi) 100)))) diff --git a/contents/monte_carlo_integration/code/rust/Cargo.toml b/contents/monte_carlo_integration/code/rust/Cargo.toml new file mode 100644 index 000000000..17ff7f385 --- /dev/null +++ b/contents/monte_carlo_integration/code/rust/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "montecarlo" +version = "0.1.0" +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +rand = "0.8.4" + +[[bin]] +path = "./monte_carlo.rs" +name = "main" \ No newline at end of file diff --git a/contents/monte_carlo_integration/code/scratch/Algorithm.svg b/contents/monte_carlo_integration/code/scratch/Algorithm.svg new file mode 100644 index 000000000..7472416c7 --- /dev/null +++ b/contents/monte_carlo_integration/code/scratch/Algorithm.svg @@ -0,0 +1 @@ +defineMonteCarlowithpointnumbernumsetCountto0repeatnumsetxtopickrandom0.0to1.0setytopickrandom0.0to1.0ifx*x+y*y&lt;radius*radiusthenchangeCountby1 the radius is 1setEstPIto4*Count/numwhenclickedsetPIto3.1415926535897932384626433832795028841971493993askNumber of point?andwaitMonteCarlowithpointnumberanswersayPi estimate done.for2secondssayjoinEstimate pi: EstPIfor2secondssayjoinReal Pi: PIfor2secondssayjoinPercent Error: joinabsofEstPI-PI/PI*100%for2seconds \ No newline at end of file diff --git a/contents/monte_carlo_integration/code/scratch/InCircle.svg b/contents/monte_carlo_integration/code/scratch/InCircle.svg new file mode 100644 index 000000000..7eab70894 --- /dev/null +++ b/contents/monte_carlo_integration/code/scratch/InCircle.svg @@ -0,0 +1 @@ +x*x+y*y<radius*radius \ No newline at end of file diff --git a/contents/monte_carlo_integration/code/scratch/monte_carlo.txt b/contents/monte_carlo_integration/code/scratch/monte_carlo.txt new file mode 100644 index 000000000..117ee78a2 --- /dev/null +++ b/contents/monte_carlo_integration/code/scratch/monte_carlo.txt @@ -0,0 +1,22 @@ +<(((x) * (x)) + ((y) * (y))) < ((radius) * (radius)) > + + +define Monte Carlo with point number (num) +set [Count v] to (0) +repeat (num) + set [x v] to (pick random (0.0) to (1.0)) + set [y v] to (pick random (0.0) to (1.0)) + if <(((x) * (x)) + ((y) * (y))) < ((radius) * (radius)) > then // the radius is 1 + change [Count] by (1) + end +end +set [EstPI v] to ((4) * ((Count) / (num))) + +when @greenflag clicked +set [PI v] to (3.1415926535897932384626433832795028841971493993) +ask [Number of point?] and wait +Monte Carlo with point number (answer) +say [Pi estimate done.] for (2) seconds +say (join [Estimate pi: ] (EstPI)) for (2) seconds +say (join [Real Pi: ] (PI)) for (2) seconds +say (join [Percent Error: ] (join ((([abs v] of ((EstPI) - (PI))) / (PI)) * (100)) [%])) for (2) seconds \ No newline at end of file diff --git a/contents/monte_carlo_integration/monte_carlo_integration.md b/contents/monte_carlo_integration/monte_carlo_integration.md index c3a50dd36..5a96fc1ef 100644 --- a/contents/monte_carlo_integration/monte_carlo_integration.md +++ b/contents/monte_carlo_integration/monte_carlo_integration.md @@ -14,7 +14,7 @@ If we embed a circle into the square with a radius $$r = \tfrac{length}{2}$$ (sh For simplicity, we can also say that $$\text{Area}_{\text{square}}=4r^2$$.

- +

Now, let's say we want to find the area of the circle without an equation. @@ -44,7 +44,7 @@ each point is tested to see whether it's in the circle or not: {% sample lang="c" %} [import:7-9, lang:"c"](code/c/monte_carlo.c) {% sample lang="cpp" %} -[import:7-16, lang:"cpp"](code/c++/monte_carlo.cpp) +[import:7-16, lang:"cpp"](code/cpp/monte_carlo.cpp) {% sample lang="js" %} [import:2-6, lang:"javascript"](code/javascript/monte_carlo.js) {% sample lang="hs" %} @@ -78,9 +78,9 @@ each point is tested to see whether it's in the circle or not: {% sample lang="php" %} [import:4-7, lang:"php"](code/php/monte_carlo.php) {% sample lang="lua" %} -[import:1-3, lang="lua"](code/lua/monte_carlo.lua) +[import:2-4, lang="lua"](code/lua/monte_carlo.lua) {% sample lang="racket" %} -[import:2-4, lang:"lisp"](code/racket/monte_carlo.rkt) +[import:6-8, lang:"racket"](code/racket/monte_carlo.rkt) {% sample lang="scala" %} [import:3-3, lang:"scala"](code/scala/monte_carlo.scala) {% sample lang="lisp" %} @@ -91,6 +91,16 @@ each point is tested to see whether it's in the circle or not: [import:2-10, lang:"bash"](code/bash/monte_carlo.bash) {% sample lang="kotlin" %} [import:3-3, lang:"kotlin"](code/kotlin/MonteCarlo.kt) +{% sample lang="m" %} +[import:8-15, lang:"matlab"](code/matlab/monte.m) +{% sample lang="scratch" %} +

+ +

+{% sample lang="coco" %} +[import:4-9, lang:"coconut"](code/coconut/monte_carlo.coco) +{% sample lang="ps1" %} +[import:1-3, lang:"powershell"](code/powershell/MonteCarlo.ps1) {% endmethod %} If it's in the circle, we increase an internal count by one, and in the end, @@ -102,7 +112,7 @@ $$ If we use a small number of points, this will only give us a rough approximation, but as we start adding more and more points, the approximation becomes much, much better (as shown below)!

- +

The true power of Monte Carlo comes from the fact that it can be used to integrate literally any object that can be embedded into the square. @@ -115,7 +125,7 @@ I can guarantee that we will see similar methods crop up all over the place in t Here is a video describing Monte Carlo integration:
- +
## Example Code @@ -137,18 +147,18 @@ Feel free to submit your version via pull request, and thanks for reading! {% sample lang="c" %} [import, lang:"c"](code/c/monte_carlo.c) {% sample lang="cpp" %} -[import, lang:"cpp"](code/c++/monte_carlo.cpp) +[import, lang:"cpp"](code/cpp/monte_carlo.cpp) {% sample lang="js" %} [import, lang:"javascript"](code/javascript/monte_carlo.js) {% sample lang="hs" %} [import, lang:"haskell"](code/haskell/monteCarlo.hs) -{%sample lang="rs" %} +{% sample lang="rs" %} [import, lang:"rust"](code/rust/monte_carlo.rs) -{%sample lang="d" %} +{% sample lang="d" %} [import, lang:"d"](code/d/monte_carlo.d) -{%sample lang="go" %} +{% sample lang="go" %} [import, lang:"go"](code/go/monteCarlo.go) -{%sample lang="r" %} +{% sample lang="r" %} [import, lang:"r"](code/r/monte_carlo.R) {% sample lang="java" %} [import, lang:"java"](code/java/MonteCarlo.java) @@ -178,7 +188,7 @@ Feel free to submit your version via pull request, and thanks for reading! {% sample lang="lua" %} [import, lang="lua"](code/lua/monte_carlo.lua) {% sample lang="racket" %} -[import, lang:"lisp"](code/racket/monte_carlo.rkt) +[import, lang:"racket"](code/racket/monte_carlo.rkt) {% sample lang="scala" %} [import, lang:"scala"](code/scala/monte_carlo.scala) {% sample lang="lisp" %} @@ -189,6 +199,17 @@ Feel free to submit your version via pull request, and thanks for reading! [import, lang:"bash"](code/bash/monte_carlo.bash) {% sample lang="kotlin" %} [import, lang:"kotlin"](code/kotlin/MonteCarlo.kt) +{% sample lang="m" %} +[import, lang:"matlab"](code/matlab/monte.m) +{% sample lang="scratch" %} +The code snippets were taken from this [scratch project](https://scratch.mit.edu/projects/319610349) +

+ +

+{% sample lang="coco" %} +[import, lang:"coconut"](code/coconut/monte_carlo.coco) +{% sample lang="ps1" %} +[import, lang:"powershell"](code/powershell/MonteCarlo.ps1) {% endmethod %} + +## License + +##### Images/Graphics + +- The image "[Frequency distribution of a double die roll](res/double_die_frequencies.png)" was created by [K. Shudipto Amin](https://github.com/shudipto-amin) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). + +- The image "[Probability Density](res/normal_distribution.png)" was created by [K. Shudipto Amin](https://github.com/shudipto-amin) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). + +##### Text + +The text of this chapter was written by [K. Shudipto Amin](https://github.com/shudipto-amin) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). + +[

](https://creativecommons.org/licenses/by-sa/4.0/) + +##### Pull Requests + +After initial licensing ([#560](https://github.com/algorithm-archivists/algorithm-archive/pull/560)), the following pull requests have modified the text or graphics of this chapter: +- none + diff --git a/contents/probability_distributions/res/double_die_frequencies.png b/contents/probability_distributions/res/double_die_frequencies.png new file mode 100644 index 000000000..874633331 Binary files /dev/null and b/contents/probability_distributions/res/double_die_frequencies.png differ diff --git a/contents/probability_distributions/res/normal_distribution.png b/contents/probability_distributions/res/normal_distribution.png new file mode 100644 index 000000000..36ea67790 Binary files /dev/null and b/contents/probability_distributions/res/normal_distribution.png differ diff --git a/contents/quantum_information/quantum_information.md b/contents/quantum_information/quantum_information.md index 56636b724..7916d36b7 100644 --- a/contents/quantum_information/quantum_information.md +++ b/contents/quantum_information/quantum_information.md @@ -33,7 +33,7 @@ As always, this section will be updated as we add more algorithms to the list. ##### Code Examples -The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/master/LICENSE.md)). +The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/main/LICENSE.md)). ##### Text diff --git a/contents/quantum_systems/code/c++/energy.cpp b/contents/quantum_systems/code/c++/energy.cpp deleted file mode 100644 index 15a58bd01..000000000 --- a/contents/quantum_systems/code/c++/energy.cpp +++ /dev/null @@ -1,59 +0,0 @@ -#include -#include - -#include - -void fft(std::vector> &x, bool inverse) { - std::vector> y(x.size(), std::complex(0.0, 0.0)); - - fftw_plan p; - - fftw_complex *in = reinterpret_cast(x.data()); - fftw_complex *out = reinterpret_cast(y.data()); - - p = fftw_plan_dft_1d(x.size(), in, out, - (inverse ? FFTW_BACKWARD : FFTW_FORWARD), FFTW_ESTIMATE); - - - fftw_execute(p); - fftw_destroy_plan(p); - - for (size_t i = 0; i < x.size(); ++i) { - x[i] = y[i] / sqrt(static_cast(x.size())); - } -} - -double calculate_energy(std::vector> wfc, - std::vector> h_r, - std::vector> h_k, - double dx, size_t size) { - std::vector> wfc_k(wfc); - std::vector> wfc_c(size); - fft(wfc_k, false); - - for (size_t i = 0; i < size; ++i) { - wfc_c[i] = conj(wfc[i]); - } - - std::vector> energy_k(size); - std::vector> energy_r(size); - - for (size_t i = 0; i < size; ++i) { - energy_k[i] = wfc_k[i] * pow(h_k[i], 2); - } - - fft(energy_k, true); - - for (size_t i = 0; i < size; ++i) { - energy_k[i] *= 0.5 * wfc_c[i]; - energy_r[i] = wfc_c[i] * h_r[i] * wfc[i]; - } - - double energy_final = 0; - - for (size_t i = 0; i < size; ++i) { - energy_final += real(energy_k[i] + energy_r[i]); - } - - return energy_final * dx; -} diff --git a/contents/quantum_systems/code/c/energy.c b/contents/quantum_systems/code/c/energy.c deleted file mode 100644 index 9086ffcd5..000000000 --- a/contents/quantum_systems/code/c/energy.c +++ /dev/null @@ -1,61 +0,0 @@ -#include -#include -#include -#include - -#include - -void fft(double complex *x, int n, bool inverse) { - double complex y[n]; - memset(y, 0, sizeof(y)); - fftw_plan p; - - if (inverse) { - p = fftw_plan_dft_1d(n, (fftw_complex*)x, (fftw_complex*)y, - FFTW_BACKWARD, FFTW_ESTIMATE); - } else { - p = fftw_plan_dft_1d(n, (fftw_complex*)x, (fftw_complex*)y, - FFTW_FORWARD, FFTW_ESTIMATE); - } - - fftw_execute(p); - fftw_destroy_plan(p); - - for (size_t i = 0; i < n; ++i) { - x[i] = y[i] / sqrt((double)n); - } -} - -double calculate_energy(double complex *wfc, double complex *h_r, - double complex *h_k, double dx, size_t size) { - double complex wfc_k[size]; - double complex wfc_c[size]; - memcpy(wfc_k, wfc, sizeof(wfc_k)); - fft(wfc_k, size, false); - - for (size_t i = 0; i < size; ++i) { - wfc_c[i] = conj(wfc[i]); - } - - double complex energy_k[size]; - double complex energy_r[size]; - - for (size_t i = 0; i < size; ++i) { - energy_k[i] = wfc_k[i] * h_k[i]; - } - - fft(energy_k, size, true); - - for (size_t i = 0; i < size; ++i) { - energy_k[i] *= wfc_c[i]; - energy_r[i] = wfc_c[i] * h_r[i] * wfc[i]; - } - - double energy_final = 0; - - for (size_t i = 0; i < size; ++i) { - energy_final += creal(energy_k[i] + energy_r[i]); - } - - return energy_final * dx; -} diff --git a/contents/quantum_systems/code/haskell/Energy.hs b/contents/quantum_systems/code/haskell/Energy.hs deleted file mode 100644 index a024fd139..000000000 --- a/contents/quantum_systems/code/haskell/Energy.hs +++ /dev/null @@ -1,14 +0,0 @@ -import Data.Array.CArray -import Data.Complex -import Math.FFT (dft, idft) -- Binding to fftw - -type Vector = CArray Int (Complex Double) - -calculateEnergy :: Double -> Vector -> Vector -> Vector -> Double -calculateEnergy dx kin pot wfc = (* dx) . sum . map realPart $ elems total - where - total = liftArray2 (+) kineticE potentialE - potentialE = wfcConj .* pot .* wfc - kineticE = wfcConj .* idft (kin .* dft wfc) - wfcConj = liftArray conjugate wfc - a .* b = liftArray2 (*) a b diff --git a/contents/quantum_systems/code/julia/energy.jl b/contents/quantum_systems/code/julia/energy.jl deleted file mode 100644 index 3efce0cb7..000000000 --- a/contents/quantum_systems/code/julia/energy.jl +++ /dev/null @@ -1,18 +0,0 @@ -# We are calculating the energy to check -function calculate_energy(wfc, H_k, H_r, dx) - # Creating momentum and conjugate wavefunctions - wfc_k = fft(wfc) - wfc_c = conj(wfc) - - # Finding the momentum and real-space energy terms - energy_k = wfc_c.*ifft((H_k) .* wfc_k) - energy_r = wfc_c.* H_r .* wfc - - # Integrating over all space - energy_final = 0 - for i = 1:length(energy_k) - energy_final += real(energy_k[i] + energy_r[i]) - end - - return energy_final*dx -end diff --git a/contents/quantum_systems/code/python/energy.py b/contents/quantum_systems/code/python/energy.py deleted file mode 100644 index 328fa9950..000000000 --- a/contents/quantum_systems/code/python/energy.py +++ /dev/null @@ -1,17 +0,0 @@ -import numpy as np - - -def calculate_energy(wfc, H_k, H_r, dx): - """Calculate the energy .""" - # Creating momentum conjugate wavefunctions - wfc_k = np.fft.fft(wfc) - wfc_c = np.conj(wfc) - - # Finding the momentum and real-space energy terms - energy_k = 0.5 * wfc_c * np.fft.ifft((H_k ** 2) * wfc_k) - energy_r = wfc_c * H_r * wfc - - # Integrating over all space - energy_final = sum(energy_k + energy_r).real - - return energy_final * dx diff --git a/contents/quantum_systems/quantum_systems.md b/contents/quantum_systems/quantum_systems.md index e28816818..71eb9305a 100644 --- a/contents/quantum_systems/quantum_systems.md +++ b/contents/quantum_systems/quantum_systems.md @@ -3,12 +3,12 @@ As I am sure you have heard, the quantum world is weird. As you deal with progressively smaller and smaller systems, at some point, it becomes less accurate to describe objects as particles. Instead, it is better to describe objects as probability densities. -These densities are easiest to understand in terms of _wavefunctions_, which are complex functions characterizing a quantum system's behaviour. +These densities are easiest to understand in terms of _wavefunctions_, which are complex functions characterizing a quantum system's behavior. Again, this is pretty common knowledge; however, there is a distinct lack of readable literature on how to simulate quantum systems, even though there are numerous methods for exactly that! This section will deal with the computation of quantum states with classical machines. Now, I know what you are thinking, "Wait. Why are we simulating quantum systems on classical computers? Why not simulate it with some sort of experiment or with quantum computers?" -Well, here's where the notation get's really sticky. +Well, here's where the notation gets really sticky. There is a clear difference between quantum computers and quantum simulators. A _quantum computer_ is the quantum analog to a classical computer, replacing bits with qubits by using quantum information theory. @@ -16,7 +16,7 @@ Quantum computers are usually thought of as a way to use quantum mechanics to ev Both Grover's and Shor's algorithms are good examples of cases where quantum computation could greatly change the landscape of modern computation as we know it! _Quantum simulators_ on the other hand are quantum systems used to better understand quantum mechanics. -These will often come in the form of experimental quantum systems that express quantum behaviour and allow us to better understand other areas of quantum systems. +These will often come in the form of experimental quantum systems that express quantum behavior and allow us to better understand other areas of quantum systems. In other words, quantum simulators are general techniques to study quantum systems on quantum hardware; however, quantum computers are quantum hardware used for the explicit purpose of quantum computation with qubits. Because supercomputers are not great at performing quantum computations, certain quantum simulators exist as a building block for quantum computation. A _universal quantum simulator_ is often called a quantum computer for this reason. @@ -29,7 +29,7 @@ $$ i \hbar \frac{\partial \Psi(\mathbf{r},t)}{\partial t} = \left[-\frac{\hbar^2}{2m} \nabla^2 + V(\mathbf{r},t) \right] \Psi(\mathbf{r},t) $$ -Where $$\Psi(\mathbf{r},t)$$ is a quantum wavefunction, $$V(\mathbf{r},t)$$ is a _trapping potential_, $$\nabla^2$$ is a _laplacian_, $$\mathbf{r}$$ is some sort of spatial component, and $$t$$ is time. +Where $$\Psi(\mathbf{r},t)$$ is a quantum wavefunction, $$V(\mathbf{r},t)$$ is a _trapping potential_, $$\nabla^2$$ is a _Laplacian_, $$\mathbf{r}$$ is some sort of spatial component, and $$t$$ is time. There is a lot to take in here; however, it's ultimately just some time derivative on the left-hand side and a spatial derivative (with some extra steps) on the right-hand side. In this way, it isn't too different from the diffusion (heat) equation: @@ -95,18 +95,18 @@ In this case, $$\sigma$$ is the standard deviation, $$\mu$$ is the statistical m Ultimately, this means that if we have a higher precision in position space, we will have a lower precision in momentum space. The converse is also true: a higher precision in momentum space will lead to a lower precision in position space. -This makes the most sense if we imagine having a gaussian-like probability density ($$|\Psi(x)|^2$$) in position space, which will provide a gaussian-like density when in momentum space. +This makes the most sense if we imagine having a Gaussian-like probability density ($$|\Psi(x)|^2$$) in position space, which will provide a Gaussian-like density when in momentum space. Here, we see that if we have a broader distribution in one space, we must have a thinner distribution in the opposite space, as shown here:

- +

Because the density can be interpreted as "the probability of finding a quantum particle at any provided location in position ($$x_i$$) or momentum ($$k_i$$) space, the interpretation is clear: the more we understand about a particle's position, the less we understand about it's momentum. This is a powerful statement and should be given some thought. -To me, the most interesting part of this description is not the physical interpretation, but the fact that this act of transforming between larger and smaller gaussians is precisely what Fourier transforms do! +To me, the most interesting part of this description is not the physical interpretation, but the fact that this act of transforming between larger and smaller Gaussians is precisely what Fourier transforms do! This further strengthens our argument from before. Position and momentum space are related by the Fourier transform! @@ -226,15 +226,15 @@ This ultimately looks like this: {% method %} {% sample lang="jl" %} -[import, lang:"julia"](code/julia/energy.jl) +[import:114-132, lang:"julia"](../split-operator_method/code/julia/split_op.jl) {% sample lang="hs" %} -[import, lang:"haskell"](code/haskell/Energy.hs) +[import:75-82, lang:"haskell"](../split-operator_method/code/haskell/splitOp.hs) {% sample lang="c" %} -[import:29-, lang:"c"](code/c/energy.c) +[import:150-184, lang:"c"](../split-operator_method/code/c/split_op.c) {% sample lang="cpp" %} -[import:26-, lang:"cpp"](code/c++/energy.cpp) +[import:158-189, lang:"cpp"](../split-operator_method/code/cpp/split_op.cpp) {% sample lang="py" %} -[import:4-17, lang:"python"](code/python/energy.py) +[import:98-112, lang:"python"](../split-operator_method/code/python/split_op.py) {% endmethod %} This calculation will be used in many different simulations of quantum systems to check our results. @@ -260,7 +260,7 @@ MathJax.Hub.Queue(["Typeset",MathJax.Hub]); ##### Code Examples -The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/master/LICENSE.md)). +The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/main/LICENSE.md)). ##### Text @@ -269,7 +269,7 @@ The text of this chapter was written by [James Schloss](https://github.com/leios [

](https://creativecommons.org/licenses/by-sa/4.0/) ##### Images/Graphics -- The animation "[FTgaussian](res/gaussian.gif)" was created by [James Schloss](https://github.com/leios) and is licenced under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). +- The animation "[FTgaussian](res/gaussian.gif)" was created by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). ##### Pull Requests diff --git a/contents/sorting_and_searching/sorting_and_searching.md b/contents/sorting_and_searching/sorting_and_searching.md deleted file mode 100644 index d6f9aab8b..000000000 --- a/contents/sorting_and_searching/sorting_and_searching.md +++ /dev/null @@ -1,32 +0,0 @@ -# Sorting and Searching -When it comes to algorithms that programmers know and love, the stereotypical examples are often algorithms that either sort or search through a given container of elements. -For the most part, sorting and searching methods are two sides to the same coin and are almost always described together due to their similarities. -To be honest, they are also good thought exercises for new programmers. -It's often inspiring to hear how individuals who have no programming experience go about solving the problem, because there are so many ways to do it! - -All said, sorting and searching are fundamental to numerous algorithms and really show how computer science has evolved with time to lower the complexity of problems that seem straightforward at first-glance. - -As the Archive evolves, more and more sorting algorithms will be added, so feel free to let me know your favorite algorithm and I'll do my best to put it in! -Right now, there are only a few (straightforward) examples, so expect some more complicated algorithms in the future! - - - - -## License - -##### Code Examples - -The code examples are licensed under the MIT license (found in [LICENSE.md](https://github.com/algorithm-archivists/algorithm-archive/blob/master/LICENSE.md)). - -##### Text - -The text of this chapter was written by [James Schloss](https://github.com/leios) and is licensed under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). - -[

](https://creativecommons.org/licenses/by-sa/4.0/) - -##### Pull Requests - -After initial licensing ([#560](https://github.com/algorithm-archivists/algorithm-archive/pull/560)), the following pull requests have modified the text or graphics of this chapter: -- none diff --git a/contents/split-operator_method/code/c/SConscript b/contents/split-operator_method/code/c/SConscript new file mode 100644 index 000000000..bb40f4a85 --- /dev/null +++ b/contents/split-operator_method/code/c/SConscript @@ -0,0 +1,6 @@ +Import('files_to_compile env') + +for file_info in files_to_compile: + build_target = f'#/build/{file_info.language}/{file_info.chapter}/{file_info.path.stem}' + build_result = env.C(build_target, str(file_info.path), LIBS=['m', 'fftw3']) + env.Alias(str(file_info.chapter), build_result) \ No newline at end of file diff --git a/contents/split-operator_method/code/c/split_op.c b/contents/split-operator_method/code/c/split_op.c index 0550e4ef2..fd5a84001 100644 --- a/contents/split-operator_method/code/c/split_op.c +++ b/contents/split-operator_method/code/c/split_op.c @@ -28,16 +28,16 @@ struct operators { double complex *wfc; }; -void fft(double complex *x, int n, bool inverse) { +void fft(double complex *x, size_t n, bool inverse) { double complex y[n]; memset(y, 0, sizeof(y)); fftw_plan p; if (inverse) { - p = fftw_plan_dft_1d(n, (fftw_complex*)x, (fftw_complex*)y, + p = fftw_plan_dft_1d((int)n, (fftw_complex*)x, (fftw_complex*)y, FFTW_BACKWARD, FFTW_ESTIMATE); } else { - p = fftw_plan_dft_1d(n, (fftw_complex*)x, (fftw_complex*)y, + p = fftw_plan_dft_1d((int)n, (fftw_complex*)x, (fftw_complex*)y, FFTW_FORWARD, FFTW_ESTIMATE); } @@ -63,9 +63,9 @@ void init_params(struct params *par, double xmax, unsigned int res, double dt, par->im_time = im; for (size_t i = 0; i < res; ++i) { - par->x[i] = xmax / res - xmax + i * (2.0 * xmax / res); + par->x[i] = xmax / res - xmax + (double)i * (2.0 * xmax / res); if (i < res / 2) { - par->k[i] = i * M_PI / xmax; + par->k[i] = (double)i * M_PI / xmax; } else { par->k[i] = ((double)i - res) * M_PI / xmax; } @@ -139,8 +139,8 @@ void split_op(struct params par, struct operators opr) { sprintf(filename, "output%lu.dat", i); FILE *fp = fopen(filename, "w"); - for (int i = 0; i < opr.size; ++i) { - fprintf(fp, "%d\t%f\t%f\n", i, density[i], creal(opr.v[i])); + for (size_t i = 0; i < opr.size; ++i) { + fprintf(fp, "%ld\t%f\t%f\n", i, density[i], creal(opr.v[i])); } fclose(fp); diff --git a/contents/split-operator_method/code/cpp/SConscript b/contents/split-operator_method/code/cpp/SConscript new file mode 100644 index 000000000..f9ec1b545 --- /dev/null +++ b/contents/split-operator_method/code/cpp/SConscript @@ -0,0 +1,6 @@ +Import('files_to_compile env') + +for file_info in files_to_compile: + build_target = f'#/build/{file_info.language}/{file_info.chapter}/{file_info.path.stem}' + build_result = env.CPlusPlus(build_target, str(file_info.path), LIBS=['m', 'fftw3']) + env.Alias(str(file_info.chapter), build_result) diff --git a/contents/split-operator_method/code/c++/split_op.cpp b/contents/split-operator_method/code/cpp/split_op.cpp similarity index 93% rename from contents/split-operator_method/code/c++/split_op.cpp rename to contents/split-operator_method/code/cpp/split_op.cpp index 74f8df2b7..bb160c901 100644 --- a/contents/split-operator_method/code/c++/split_op.cpp +++ b/contents/split-operator_method/code/cpp/split_op.cpp @@ -28,9 +28,9 @@ struct Params { im_time = im; for (size_t i = 0; i < res; ++i) { - x.emplace_back(xmax / res - xmax + i * (2.0 * xmax / res)); + x.emplace_back(xmax / res - xmax + static_cast(i) * (2.0 * xmax / res)); if (i < res / 2) { - k.push_back(i * M_PI / xmax); + k.push_back(static_cast(i) * M_PI / xmax); } else { k.push_back((static_cast(i) - res) * M_PI / xmax); } @@ -85,7 +85,7 @@ void fft(vector_complex &x, bool inverse) { fftw_complex *in = reinterpret_cast(x.data()); fftw_complex *out = reinterpret_cast(y.data()); - p = fftw_plan_dft_1d(x.size(), in, out, + p = fftw_plan_dft_1d(static_cast(x.size()), in, out, (inverse ? FFTW_BACKWARD : FFTW_FORWARD), FFTW_ESTIMATE); fftw_execute(p); @@ -97,7 +97,7 @@ void fft(vector_complex &x, bool inverse) { } void split_op(Params &par, Operators &opr) { - double density[opr.size]; + auto density = std::vector(opr.size, 0); for (size_t i = 0; i < par.timesteps; ++i) { for (size_t j = 0; j < opr.size; ++j) { @@ -142,7 +142,7 @@ void split_op(Params &par, Operators &opr) { std::ofstream fstream = std::ofstream(filename_stream.str()); if (fstream) { - for (int i = 0; i < opr.size; ++i) { + for (std::size_t i = 0; i < opr.size; ++i) { std::stringstream data_stream; data_stream << i << "\t" << density[i] << "\t" << real(opr.v[i]) << "\n"; diff --git a/contents/split-operator_method/code/rust/Cargo.toml b/contents/split-operator_method/code/rust/Cargo.toml new file mode 100644 index 000000000..def85c23e --- /dev/null +++ b/contents/split-operator_method/code/rust/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "splitop" +version = "0.1.0" +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +rustfft = "4.1.0" + +[[bin]] +path = "./split_op.rs" +name = "main" \ No newline at end of file diff --git a/contents/split-operator_method/code/rust/split_op.rs b/contents/split-operator_method/code/rust/split_op.rs new file mode 100644 index 000000000..d29e16a2e --- /dev/null +++ b/contents/split-operator_method/code/rust/split_op.rs @@ -0,0 +1,196 @@ +extern crate rustfft; + +use rustfft::num_complex::Complex; +use rustfft::FFTplanner; +use std::f64::consts::PI; +use std::fs::File; +use std::io::Write; +use std::path::Path; + +// This implementation is based on the C and C++ implementations. + +#[derive(Clone)] +struct Parameters { + xmax: f64, + res: usize, + dt: f64, + timesteps: usize, + dx: f64, + x: Vec, + dk: f64, + k: Vec, + im_time: bool, +} + +impl Parameters { + pub fn new(xmax: f64, res: usize, dt: f64, timesteps: usize, im_time: bool) -> Parameters { + let dx = 2.0_f64 * xmax / (res as f64); + let mut x: Vec = Vec::with_capacity(res); + let dk = PI / xmax; + let mut k: Vec = Vec::with_capacity(res); + for i in 0..res { + x.push(xmax / (res as f64) - xmax + (i as f64) * dx); + match i { + i if (i < res / 2) => k.push((i as f64) * PI / xmax), + _ => k.push(((i as f64) - (res as f64)) * PI / xmax), + } + } + Parameters { + xmax, + res, + dt, + timesteps, + im_time, + dx, + x, + dk, + k, + } + } +} + +struct Operators { + v: Vec>, + pe: Vec>, + ke: Vec>, + wfc: Vec>, +} + +impl Operators { + pub fn new(par: &Parameters, v_offset: f64, wfc_offset: f64) -> Operators { + let mut v: Vec> = Vec::with_capacity(par.res); + let mut pe: Vec> = Vec::with_capacity(par.res); + let mut ke: Vec> = Vec::with_capacity(par.res); + let mut wfc: Vec> = Vec::with_capacity(par.res); + + for i in 0..par.res { + v.push(Complex::new( + 0.5_f64 * (par.x[i] - v_offset).powi(2), + 0.0_f64, + )); + wfc.push(Complex::new( + (-((par.x[i] - wfc_offset).powi(2)) / 2.0_f64).exp(), + 0.0_f64, + )); + if par.im_time { + ke.push(Complex::new( + (-0.5_f64 * par.dt * par.k[i].powi(2)).exp(), + 0.0_f64, + )); + pe.push(Complex::new((-0.5_f64 * par.dt * v[i].re).exp(), 0.0_f64)); + } else { + ke.push(Complex::new( + 0.0_f64, + (-0.5_f64 * par.dt * par.k[i].powi(2)).exp(), + )); + pe.push(Complex::new(0.0_f64, (-0.5_f64 * par.dt * v[i].re).exp())); + } + } + Operators { v, pe, ke, wfc } + } +} + +fn fft(x: &mut Vec>, inverse: bool) { + let mut y = vec![Complex::new(0.0_f64, 0.0_f64); x.len()]; + let mut p = FFTplanner::new(inverse); + let fft = p.plan_fft(x.len()); + fft.process(x.as_mut_slice(), y.as_mut_slice()); + + for i in 0..x.len() { + x[i] = y[i] / (x.len() as f64).sqrt(); + } +} + +fn split_op(par: &Parameters, opr: &mut Operators) { + let mut density: Vec; + + for i in 0..par.timesteps { + for j in 0..par.res { + opr.wfc[j] *= opr.pe[j]; + } + + fft(&mut opr.wfc, false); + + for j in 0..par.res { + opr.wfc[j] *= opr.ke[j]; + } + + fft(&mut opr.wfc, true); + + for j in 0..par.res { + opr.wfc[j] *= opr.pe[j]; + } + + density = opr.wfc.iter().map(|x| x.norm().powi(2)).collect(); + + if par.im_time { + let sum = density.iter().sum::() * par.dx; + + for j in 0..par.res { + opr.wfc[j] /= sum.sqrt(); + } + } + + // Writing data into a file in the format of: + // index, density, real potential. + let path_name = format!("output{}.dat", i); + let path = Path::new(&path_name); + let display = path.display(); + + let mut file = match File::create(&path) { + Err(why) => panic!("Couldn't create {}: {}", display, why), + Ok(good) => good, + }; + + for j in 0..par.res { + if let Err(why) = writeln!(file, "{}\t{}\t{}", j, density[j], opr.v[j].re) { + panic!("Couldn't write to {}: {}", display, why) + } + if let Err(why) = file.flush() { + panic!("Couldn't flush {}: {}", display, why) + } + } + } +} + +fn calculate_energy(par: &Parameters, opr: &Operators) -> f64 { + let wfc_r = opr.wfc.clone(); + let mut wfc_k = opr.wfc.clone(); + let mut wfc_c = vec![Complex::new(0.0_f64, 0.0_f64); par.res]; + + fft(&mut wfc_k, false); + + for i in 0..par.res { + wfc_c[i] = wfc_r[i].conj(); + } + + let mut energy_k = vec![Complex::new(0.0_f64, 0.0_f64); par.res]; + let mut energy_r = vec![Complex::new(0.0_f64, 0.0_f64); par.res]; + + for i in 0..par.res { + energy_k[i] = wfc_k[i] * Complex::new(par.k[i], 0.0_f64).powi(2); + } + + fft(&mut energy_k, true); + + for i in 0..par.res { + energy_k[i] *= wfc_c[i].scale(0.5_f64); + energy_r[i] = wfc_c[i] * opr.v[i] * wfc_r[i]; + } + + let energy_final = energy_k + .into_iter() + .zip(energy_r.into_iter()) + .fold(0.0_f64, |acc, x| acc + (x.0 + x.1).re); + + energy_final * par.dx +} + +fn main() { + let par = Parameters::new(5.0, 256, 0.05, 100, true); + let mut opr = Operators::new(&par, 0.0, -1.0); + + split_op(&par, &mut opr); + + println!("The energy is {}", calculate_energy(&par, &opr)); +} diff --git a/contents/split-operator_method/split-operator_method.md b/contents/split-operator_method/split-operator_method.md index 20ae39e7b..08fc3d33d 100644 --- a/contents/split-operator_method/split-operator_method.md +++ b/contents/split-operator_method/split-operator_method.md @@ -6,11 +6,11 @@ $$ i \hbar \frac{\partial \Psi(\mathbf{r},t)}{\partial t} = \left[-\frac{\hbar^2}{2m}\nabla^2 + V(\mathbf{r}) + g|\Psi(\mathbf{r},t)|^2 \right] \Psi(\mathbf{r},t), $$ -which follows from the notation provided in the [quantum systems](../quantum_systems/quantum_systems.md) chapter: $$\Psi(\mathbf{r},t)$$ is a quantum wave-function with spatial ($$\mathbf{r}$$) and time ($$t$$) dependence, $$\nabla^2$$ is a laplacian, and $$V(\mathbf{r})$$ is a potential of some sort (like $$\omega x^2$$ or something). +which follows from the notation provided in the [quantum systems](../quantum_systems/quantum_systems.md) chapter: $$\Psi(\mathbf{r},t)$$ is a quantum wave-function with spatial ($$\mathbf{r}$$) and time ($$t$$) dependence, $$\nabla^2$$ is a Laplacian, and $$V(\mathbf{r})$$ is a potential of some sort (like $$\omega x^2$$ or something). In this case, we also add an interaction term $$g$$ next to a nonlinear $$|\Psi(\mathbf{r},t)|^2$$ term. This is the system I studied for most of my PhD (granted, we played a few tricks with parallelization and such, so it was _slightly_ more complicated). -At its heart, the split-op method is nothing more than a pseudo-spectral differential equation solver... That is to say, it solves the Schrödinger equation with [FFT's](../cooley_tukey/cooley_tukey.md). +At its heart, the split-op method is nothing more than a pseudo-spectral differential equation solver... That is to say, it solves the Schrödinger equation with [FFTs](../cooley_tukey/cooley_tukey.md). In fact, there is a large class of spectral and pseudo-spectral methods used to solve a number of different physical systems, and we'll definitely be covering those in the future. As mentioned in the [quantum systems](../quantum_systems/quantum_systems.md) section, we can represent a quantum wavefunction in momentum space, which is parameterized with the wavevector $$k$$. In the Hamiltonian shown above, we can split our system into position space components, $$\hat{H}_r = \left[V(\mathbf{r}) + g|\Psi(\mathbf{r},t)|^2 \right] \Psi(\mathbf{r},t)$$, and momentum space components, $$\hat{H}_k = \left[-\frac{\hbar^2}{2m}\nabla^2 \right]\Psi(\mathbf{r},t)$$. @@ -50,21 +50,21 @@ where $$\hat{U}_r = e^{-\frac{i\hat{H}_rdt}{\hbar}}$$, $$\hat{U}_k = e^{-\frac{i Here's a flowchart of what we are looking for every timestep:

- +

For the most part, that's it: 1. Multiply the wavefunction in real space with the real-space operator. 2. Flip to momentum space with a Fourier transform. -3. Multiply the momentum-space wavefuntion by the momentum-space operator. +3. Multiply the momentum-space wavefunction by the momentum-space operator. 4. Flip to position space with an inverse Fourier transform. 5. Repeat 1-4 until satisfied. -If we guess that our initial wavefunction is gaussian-like and is slightly offset from the center or the trap, this should allow us to see our wavefunction "sloshing" back and forth in our trap, like so: +If we guess that our initial wavefunction is Gaussian-like and is slightly offset from the center or the trap, this should allow us to see our wavefunction "sloshing" back and forth in our trap, like so:

- +

As a small concession, using this method enforces periodic boundary conditions, where the wavefunction will simply slide from one side of your simulation box to the other, but that's fine for most cases. @@ -79,7 +79,7 @@ This means that we can find the ground state of our system by running the simula If we run the same simulation as above in imaginary time, we should see our wavefunction smoothly move to the center of our trap (the lowest energy position), like so:

- +

@@ -103,17 +103,19 @@ Regardless, we first need to set all the initial parameters, including the initi [import:11-21, lang:"c"](code/c/split_op.c) [import:52-73, lang:"c"](code/c/split_op.c) {% sample lang="cpp" %} -[import:14-49, lang:"cpp"](code/c++/split_op.cpp) +[import:14-49, lang:"cpp"](code/cpp/split_op.cpp) {% sample lang="py" %} [import:11-30, lang:"python"](code/python/split_op.py) {% sample lang="hs" %} [import:17-47, lang:"haskell"](code/haskell/splitOp.hs) +{% sample lang="rs" %} +[import:14-51, lang:"rust"](code/rust/split_op.rs) {% endmethod %} As a note, when we generate our grid in momentum space `k`, we need to split the grid into two lines, one that is going from `0` to `-kmax` and is then discontinuous and goes from `kmax` to `0`. This is simply because the FFT will naturally assume that the `0` in our grid is at the left side of the simulation, so we shift k-space to match this expectation. Also, for this code we will be using notation to what we used above: `opr.R` will be the real space operators and `opr.K` will be the momentum space operators. -There is another boolean value here called `im_time`, which is for imaginary time evolution. +There is another Boolean value here called `im_time`, which is for imaginary time evolution. Afterwards, we turn them into operators: @@ -124,17 +126,19 @@ Afterwards, we turn them into operators: [import:23-29, lang:"c"](code/c/split_op.c) [import:75-96, lang:"c"](code/c/split_op.c) {% sample lang="cpp" %} -[import:51-80, lang:"cpp"](code/c++/split_op.cpp) +[import:51-80, lang:"cpp"](code/cpp/split_op.cpp) {% sample lang="py" %} [import:33-54, lang:"python"](code/python/split_op.py) {% sample lang="hs" %} [import:49-66, lang:"haskell"](code/haskell/splitOp.hs) +{% sample lang="rs" %} +[import:53-92, lang:"rust"](code/rust/split_op.rs) {% endmethod %} -Here, we use a standard harmonic potential for the atoms to sit in and a gaussian distribution for an initial guess for the probability distribution. -If we give either the trap or the atoms a slight offset (so the gaussian distribution of atoms does not *quite* rest at the bottom of the $$x^2$$ potential, we can see the atoms moving back and forth in the potential as we move the simulation forward in time. +Here, we use a standard harmonic potential for the atoms to sit in and a Gaussian distribution for an initial guess for the probability distribution. +If we give either the trap or the atoms a slight offset (so the Gaussian distribution of atoms does not *quite* rest at the bottom of the $$x^2$$ potential, we can see the atoms moving back and forth in the potential as we move the simulation forward in time. This means that we can easily see the dynamics of our quantum system! -If we run the simulation in imaginary time, we will see the gaussian distribution of atoms move towards the center of the potential, which is the location with the lowest energy. +If we run the simulation in imaginary time, we will see the Gaussian distribution of atoms move towards the center of the potential, which is the location with the lowest energy. Both of these have been shown in the figures above. The final step is to do the iteration, itself. @@ -145,11 +149,13 @@ The final step is to do the iteration, itself. {% sample lang="c" %} [import:98-148, lang:"c"](code/c/split_op.c) {% sample lang="cpp" %} -[import:99-156, lang:"cpp"](code/c++/split_op.cpp) +[import:99-156, lang:"cpp"](code/cpp/split_op.cpp) {% sample lang="py" %} [import:57-95, lang:"python"](code/python/split_op.py) {% sample lang="hs" %} [import:68-73, lang:"haskell"](code/haskell/splitOp.hs) +{% sample lang="rs" %} +[import:105-155, lang:"rust"](code/rust/split_op.rs) {% endmethod %} And that's it. @@ -166,11 +172,11 @@ The Split-Operator method is one of the most commonly used quantum simulation al Here is a video describing the split-operator method:
- +
## Example Code -This example code is a simulation of a gaussian distribution of atoms slightly offset in a harmonic trap in imaginary time. +This example code is a simulation of a Gaussian distribution of atoms slightly offset in a harmonic trap in imaginary time. So long as the code is written appropriately, this means that the atoms should move towards the center of the trap and the energy should decay to $$\frac{1}{2}\hbar\omega$$, which will be simply $$\frac{1}{2}$$ in this simulation. Checking to make sure your code can output the correct energy for a harmonic trap is a good test to make sure it is all working under-the-hood before simulating systems with more complicated Hamiltonians. @@ -180,11 +186,13 @@ Checking to make sure your code can output the correct energy for a harmonic tra {% sample lang="c" %} [import, lang:"c"](code/c/split_op.c) {% sample lang="cpp" %} -[import, lang:"cpp"](code/c++/split_op.cpp) +[import, lang:"cpp"](code/cpp/split_op.cpp) {% sample lang="py" %} [import:5-127, lang:"python"](code/python/split_op.py) {% sample lang="hs" %} [import, lang:"haskell"](code/haskell/splitOp.hs) +{% sample lang="rs" %} +[import, lang:"rust"](code/rust/split_op.rs) {% endmethod %}